file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs | ,
sockaddr_in,
sockaddr_in6,
sockaddr_un,
sa_family_t,
};
pub use self::multicast::{
ip_mreq,
ipv6_mreq,
};
pub use self::consts::*;
pub use libc::sockaddr_storage;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[repr(i32)]
pub enum SockType {
Stream = consts::SOCK_STREAM,
Datagram = consts::SOCK_DGRAM,
SeqPacket = consts::SOCK_SEQPACKET,
Raw = consts::SOCK_RAW,
Rdm = consts::SOCK_RDM,
}
// Extra flags - Supported by Linux 2.6.27, normalized on other platforms
bitflags!(
pub struct SockFlag: c_int {
const SOCK_NONBLOCK = 0o0004000;
const SOCK_CLOEXEC = 0o2000000;
}
);
/// Copy the in-memory representation of src into the byte slice dst,
/// updating the slice to point to the remainder of dst only. Unsafe
/// because it exposes all bytes in src, which may be UB if some of them
/// are uninitialized (including padding).
unsafe fn copy_bytes<'a, 'b, T:?Sized>(src: &T, dst: &'a mut &'b mut [u8]) {
let srclen = mem::size_of_val(src);
let mut tmpdst = &mut [][..];
mem::swap(&mut tmpdst, dst);
let (target, mut remainder) = tmpdst.split_at_mut(srclen);
// Safe because the mutable borrow of dst guarantees that src does not alias it.
ptr::copy_nonoverlapping(src as *const T as *const u8, target.as_mut_ptr(), srclen);
mem::swap(dst, &mut remainder);
}
use self::ffi::{cmsghdr, msghdr, type_of_cmsg_len, type_of_cmsg_data};
/// A structure used to make room in a cmsghdr passed to recvmsg. The
/// size and alignment match that of a cmsghdr followed by a T, but the
/// fields are not accessible, as the actual types will change on a call
/// to recvmsg.
///
/// To make room for multiple messages, nest the type parameter with
/// tuples, e.g.
/// `let cmsg: CmsgSpace<([RawFd; 3], CmsgSpace<[RawFd; 2]>)> = CmsgSpace::new();`
pub struct CmsgSpace<T> {
_hdr: cmsghdr,
_data: T,
}
impl<T> CmsgSpace<T> {
/// Create a CmsgSpace<T>. The structure is used only for space, so
/// the fields are uninitialized.
pub fn new() -> Self {
// Safe because the fields themselves aren't accessible.
unsafe { mem::uninitialized() }
}
}
pub struct RecvMsg<'a> {
// The number of bytes received.
pub bytes: usize,
cmsg_buffer: &'a [u8],
pub address: Option<SockAddr>,
pub flags: MsgFlags,
}
impl<'a> RecvMsg<'a> {
/// Iterate over the valid control messages pointed to by this
/// msghdr.
pub fn cmsgs(&self) -> CmsgIterator {
CmsgIterator(self.cmsg_buffer)
}
}
pub struct CmsgIterator<'a>(&'a [u8]);
impl<'a> Iterator for CmsgIterator<'a> {
type Item = ControlMessage<'a>;
// The implementation loosely follows CMSG_FIRSTHDR / CMSG_NXTHDR,
// although we handle the invariants in slightly different places to
// get a better iterator interface.
fn next(&mut self) -> Option<ControlMessage<'a>> {
let buf = self.0;
let sizeof_cmsghdr = mem::size_of::<cmsghdr>();
if buf.len() < sizeof_cmsghdr {
return None;
}
let cmsg: &cmsghdr = unsafe { mem::transmute(buf.as_ptr()) };
// This check is only in the glibc implementation of CMSG_NXTHDR
// (although it claims the kernel header checks this), but such
// a structure is clearly invalid, either way.
let cmsg_len = cmsg.cmsg_len as usize;
if cmsg_len < sizeof_cmsghdr {
return None;
}
let len = cmsg_len - sizeof_cmsghdr;
// Advance our internal pointer.
if cmsg_align(cmsg_len) > buf.len() {
return None;
}
self.0 = &buf[cmsg_align(cmsg_len)..];
match (cmsg.cmsg_level, cmsg.cmsg_type) {
(SOL_SOCKET, SCM_RIGHTS) => unsafe {
Some(ControlMessage::ScmRights(
slice::from_raw_parts(
&cmsg.cmsg_data as *const _ as *const _, 1)))
},
(_, _) => unsafe {
Some(ControlMessage::Unknown(UnknownCmsg(
&cmsg,
slice::from_raw_parts(
&cmsg.cmsg_data as *const _ as *const _,
len))))
}
}
}
}
/// A type-safe wrapper around a single control message. More types may
/// be added to this enum; do not exhaustively pattern-match it.
/// [Further reading](http://man7.org/linux/man-pages/man3/cmsg.3.html)
pub enum ControlMessage<'a> {
/// A message of type SCM_RIGHTS, containing an array of file
/// descriptors passed between processes. See the description in the
/// "Ancillary messages" section of the
/// [unix(7) man page](http://man7.org/linux/man-pages/man7/unix.7.html).
ScmRights(&'a [RawFd]),
#[doc(hidden)]
Unknown(UnknownCmsg<'a>),
}
// An opaque structure used to prevent cmsghdr from being a public type
#[doc(hidden)]
pub struct UnknownCmsg<'a>(&'a cmsghdr, &'a [u8]);
fn cmsg_align(len: usize) -> usize {
let align_bytes = mem::size_of::<type_of_cmsg_data>() - 1;
(len + align_bytes) &!align_bytes
}
impl<'a> ControlMessage<'a> {
/// The value of CMSG_SPACE on this message.
fn space(&self) -> usize {
cmsg_align(self.len())
}
/// The value of CMSG_LEN on this message.
fn len(&self) -> usize {
cmsg_align(mem::size_of::<cmsghdr>()) + match *self {
ControlMessage::ScmRights(fds) => {
mem::size_of_val(fds)
},
ControlMessage::Unknown(UnknownCmsg(_, bytes)) => {
mem::size_of_val(bytes)
}
}
}
// Unsafe: start and end of buffer must be size_t-aligned (that is,
// cmsg_align'd). Updates the provided slice; panics if the buffer
// is too small.
unsafe fn encode_into<'b>(&self, buf: &mut &'b mut [u8]) {
match *self {
ControlMessage::ScmRights(fds) => { | cmsg_len: self.len() as type_of_cmsg_len,
cmsg_level: SOL_SOCKET,
cmsg_type: SCM_RIGHTS,
cmsg_data: [],
};
copy_bytes(&cmsg, buf);
let padlen = cmsg_align(mem::size_of_val(&cmsg)) -
mem::size_of_val(&cmsg);
let mut tmpbuf = &mut [][..];
mem::swap(&mut tmpbuf, buf);
let (_padding, mut remainder) = tmpbuf.split_at_mut(padlen);
mem::swap(buf, &mut remainder);
copy_bytes(fds, buf);
},
ControlMessage::Unknown(UnknownCmsg(orig_cmsg, bytes)) => {
copy_bytes(orig_cmsg, buf);
copy_bytes(bytes, buf);
}
}
}
}
/// Send data in scatter-gather vectors to a socket, possibly accompanied
/// by ancillary data. Optionally direct the message at the given address,
/// as with sendto.
///
/// Allocates if cmsgs is nonempty.
pub fn sendmsg<'a>(fd: RawFd, iov: &[IoVec<&'a [u8]>], cmsgs: &[ControlMessage<'a>], flags: MsgFlags, addr: Option<&'a SockAddr>) -> Result<usize> {
let mut len = 0;
let mut capacity = 0;
for cmsg in cmsgs {
len += cmsg.len();
capacity += cmsg.space();
}
// Note that the resulting vector claims to have length == capacity,
// so it's presently uninitialized.
let mut cmsg_buffer = unsafe {
let mut vec = Vec::<u8>::with_capacity(len);
vec.set_len(len);
vec
};
{
let mut ptr = &mut cmsg_buffer[..];
for cmsg in cmsgs {
unsafe { cmsg.encode_into(&mut ptr) };
}
}
let (name, namelen) = match addr {
Some(addr) => { let (x, y) = unsafe { addr.as_ffi_pair() }; (x as *const _, y) }
None => (0 as *const _, 0),
};
let cmsg_ptr = if capacity > 0 {
cmsg_buffer.as_ptr() as *const c_void
} else {
ptr::null()
};
let mhdr = msghdr {
msg_name: name as *const c_void,
msg_namelen: namelen,
msg_iov: iov.as_ptr(),
msg_iovlen: iov.len() as size_t,
msg_control: cmsg_ptr,
msg_controllen: capacity as size_t,
msg_flags: 0,
};
let ret = unsafe { ffi::sendmsg(fd, &mhdr, flags.bits()) };
Errno::result(ret).map(|r| r as usize)
}
/// Receive message in scatter-gather vectors from a socket, and
/// optionally receive ancillary data into the provided buffer.
/// If no ancillary data is desired, use () as the type parameter.
pub fn recvmsg<'a, T>(fd: RawFd, iov: &[IoVec<&mut [u8]>], cmsg_buffer: Option<&'a mut CmsgSpace<T>>, flags: MsgFlags) -> Result<RecvMsg<'a>> {
let mut address: sockaddr_storage = unsafe { mem::uninitialized() };
let (msg_control, msg_controllen) = match cmsg_buffer {
Some(cmsg_buffer) => (cmsg_buffer as *mut _, mem::size_of_val(cmsg_buffer)),
None => (0 as *mut _, 0),
};
let mut mhdr = msghdr {
msg_name: &mut address as *const _ as *const c_void,
msg_namelen: mem::size_of::<sockaddr_storage>() as socklen_t,
msg_iov: iov.as_ptr() as *const IoVec<&[u8]>, // safe cast to add const-ness
msg_iovlen: iov.len() as size_t,
msg_control: msg_control as *const c_void,
msg_controllen: msg_controllen as size_t,
msg_flags: 0,
};
let ret = unsafe { ffi::recvmsg(fd, &mut mhdr, flags.bits()) };
Ok(unsafe { RecvMsg {
bytes: try!(Errno::result(ret)) as usize,
cmsg_buffer: slice::from_raw_parts(mhdr.msg_control as *const u8,
mhdr.msg_controllen as usize),
address: sockaddr_storage_to_addr(&address,
mhdr.msg_namelen as usize).ok(),
flags: MsgFlags::from_bits_truncate(mhdr.msg_flags),
} })
}
/// Create an endpoint for communication
///
/// [Further reading](http://man7.org/linux/man-pages/man2/socket.2.html)
pub fn socket(domain: AddressFamily, ty: SockType, flags: SockFlag, protocol: c_int) -> Result<RawFd> {
let mut ty = ty as c_int;
let feat_atomic = features::socket_atomic_cloexec();
if feat_atomic {
ty = ty | flags.bits();
}
// TODO: Check the kernel version
let res = try!(Errno::result(unsafe { ffi::socket(domain as c_int, ty, protocol) }));
if!feat_atomic {
if flags.contains(SOCK_CLOEXEC) {
try!(fcntl(res, F_SETFD(FD_CLOEXEC)));
}
if flags.contains(SOCK_NONBLOCK) {
try!(fcntl(res, F_SETFL(O_NONBLOCK)));
}
}
Ok(res)
}
/// Create a pair of connected sockets
///
/// [Further reading](http://man7.org/linux/man-pages/man2/socketpair.2.html)
pub fn socketpair(domain: AddressFamily, ty: SockType, protocol: c_int,
flags: SockFlag) -> Result<(RawFd, RawFd)> {
let mut ty = ty as c_int;
let feat_atomic = features::socket_atomic_cloexec();
if feat_atomic {
ty = ty | flags.bits();
}
let mut fds = [-1, -1];
let res = unsafe {
ffi::socketpair(domain as c_int, ty, protocol, fds.as_mut_ptr())
};
try!(Errno::result(res));
if!feat_atomic {
if flags.contains(SOCK_CLOEXEC) {
try!(fcntl(fds[0], F_SETFD(FD_CLOEXEC)));
try!(fcntl(fds[1], F_SETFD(FD_CLOEXEC)));
}
if flags.contains(SOCK_NONBLOCK) {
try!(fcntl(fds[0], F_SETFL(O_NONBLOCK)));
try!(fcntl(fds[1], F_SETFL(O_NONBLOCK)));
}
}
Ok((fds[0], fds[1]))
}
/// Listen for connections on a socket
///
/// [Further reading](http://man7.org/linux/man-pages/man2/listen.2.html)
pub fn listen(sockfd: RawFd, backlog: usize) -> Result<()> {
let res = unsafe { ffi::listen(sockfd, backlog as c_int) };
Errno::result(res).map(drop)
}
/// Bind a name to a socket
///
/// [Further reading](http://man7.org/linux/man-pages/man2/bind.2.html)
#[cfg(not(all(target_os="android", target_pointer_width="64")))]
pub fn bind(fd: RawFd, addr: &SockAddr) -> Result<()> {
let res = unsafe {
let (ptr, len) = addr.as_ffi_pair();
ffi::bind(fd, ptr, len)
};
Errno::result(res).map(drop)
}
/// Bind a name to a socket
///
/// [Further reading](http://man7.org/linux/man-pages/man2/bind.2.html)
// Android has some weirdness. Its 64-bit bind takes a c_int instead of a
// socklen_t
#[cfg(all(target_os="android", target_pointer_width="64"))]
pub fn bind(fd: RawFd, addr: &SockAddr) -> Result<()> {
let res = unsafe {
let (ptr, len) = addr.as_ffi_pair();
ffi::bind(fd, ptr, len as c_int)
};
Errno::result(res).map(drop)
}
/// Accept a connection on a socket
///
/// [Further reading](http://man7.org/linux/man-pages/man2/accept.2.html)
pub fn accept(sockfd: RawFd) -> Result<RawFd> {
let res = unsafe { ffi::accept(sockfd, ptr::null_mut(), ptr::null_mut()) };
Errno::result(res)
}
/// Accept a connection on a socket
///
/// [Further reading](http://man7.org/linux/man-pages/man2/accept.2.html)
pub fn accept4(sockfd: RawFd, flags: SockFlag) -> Result<RawFd> {
accept4_polyfill(sockfd, flags)
}
#[inline]
fn accept4_polyfill(sockfd: RawFd, flags: SockFlag) -> Result<RawFd> {
let res = try!(Errno::result(unsafe { ffi::accept(sockfd, ptr::null_mut(), ptr::null_mut()) }));
if flags.contains(SOCK_CLOEXEC) {
try!(fcntl(res, F_SETFD(FD_CLOEXEC)));
}
if flags.contains(SOCK_NONBLOCK) {
try!(fcntl(res, F_SETFL(O_NONBLOCK)));
}
Ok(res)
}
/// Initiate a connection on a socket
///
/// [Further reading](http://man7.org/linux/man-pages/man2/connect.2.html)
pub fn connect(fd: RawFd, addr: &SockAddr) -> Result<()> {
let res = unsafe {
let (ptr, len) = addr.as_ffi_pair();
ffi::connect(fd, ptr, len)
};
Errno::result(res).map(drop)
}
/// Receive data from a connection-oriented socket. Returns the number of
/// bytes read
///
/// [Further reading](http://man7.org/linux/man-pages/man2/recv.2.html)
pub fn recv(sockfd: RawFd, buf: &mut [u8], flags: MsgFlags) -> Result<usize> {
unsafe {
let ret = ffi::recv(
sockfd,
buf.as_ptr() as *mut c_void,
buf.len() as size_t,
flags.bits());
Errno::result(ret).map(|r| r as usize)
}
}
/// Receive data from a connectionless or connection-oriented socket. Returns
/// the number of bytes read and the socket address of the sender.
///
/// [Further reading](http://man7.org/linux/man-pages/man2/recvmsg.2.html)
pub fn recvfrom(sockfd: RawFd, buf: &mut [u8]) -> Result<(usize, SockAddr)> {
unsafe {
let addr: sockaddr_storage = mem::zeroed();
let mut len = mem::size_of::<sockaddr_storage>() as socklen_t;
let ret = try!(Errno::result(ffi::recvfrom(
sockfd,
buf.as_ptr() as *mut c_void,
buf.len() as size_t,
0,
mem::transmute(&addr),
&mut len as *mut socklen_t)));
sockaddr_storage_to_addr(&addr, len as usize)
.map(|addr| (ret as usize, addr))
}
}
pub fn sendto(fd: RawFd, buf: &[u8], addr: &SockAddr, flags: MsgFlags) -> Result<usize> {
let ret = unsafe {
let (ptr, len) = addr.as_ffi_pair();
ffi::sendto(fd, buf.as_ptr() as *const c_void, buf.len() as size_t, flags.bits(), ptr, len)
};
Errno::result(ret).map(|r| r as usize)
}
/// Send data to a connection-oriented socket. Returns the number of bytes read
///
/// [Further reading](http://man7.org/linux/man-pages/man2/send.2.html)
pub fn send(fd: RawFd, buf: &[u8], flags: MsgFlags) -> Result<usize> {
let ret = unsafe {
ffi::send(fd, buf.as_ptr() as *const c_void, buf.len() as size_t, flags.bits())
};
Errno::result(ret).map(|r| r as usize)
}
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct linger {
pub l_onoff: c_int,
pub l_linger: c_int
}
#[repr(C)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct ucred {
pid: pid_t,
uid: uid_t,
gid: gid_t,
}
/*
*
* ===== Socket Options =====
*
*/
/// The protocol level at which to get / set socket options. Used as an
/// argument to `getsockopt` and `setsockopt`.
///
/// [Further reading](http://man7.org/linux/man-pages/man2/setsockopt.2.html)
#[repr(i32)]
pub enum SockLevel {
Socket = SOL_SOCKET,
Tcp = IPPROTO_TCP,
Ip = IPPROTO_IP,
Ipv6 = IPPROTO_IPV6,
Udp = IPPROTO_UDP,
#[cfg(any(target_os = "linux", target_os = "android"))]
Netlink = SOL_NETLINK,
}
/// Represents a socket option that can be accessed or set. Used as an argument
/// to `getsockopt`
pub trait GetSockOpt : Copy {
type Val;
#[doc(hidden)]
fn get(&self, fd: RawFd) -> Result<Self::Val>;
}
/// Represents a socket option that can be accessed or set. Used as an argument
/// to `setsockopt`
pub trait SetSockOpt : Copy {
type Val;
#[doc(hidden)]
fn set(&self, fd: RawFd, val: &Self::Val) -> Result<()>;
}
/// Get the current value for the requested socket option
///
/// [Further reading](http://man7.org/linux/man-pages/man2/getsockopt.2.html)
pub fn getsockopt<O: GetSockOpt>(fd: RawFd, opt: O) -> Result<O::Val> {
opt.get(fd)
}
/// Sets the value for the requested socket option
///
/// [Further reading](http://man7.org/linux/man-pages/man2/setsockopt.2.html)
pub fn setsockopt<O: SetSockOpt>(fd: RawFd, opt: O, val: &O::Val) -> Result<()> {
opt.set(fd, val)
}
/// Get the address of the peer connected to the socket `fd`.
///
/// [Further reading](http://man7.org/linux/man-pages/man2/getpeername.2.html)
pub fn getpeername(fd: RawFd) -> Result<SockAddr> {
unsafe {
let addr: sockaddr_storage = mem::uninitialized();
let mut len = mem::size_of::<sockaddr_storage>() as socklen_t;
let ret = ffi::getpeername(fd, mem::transmute(&addr), &mut len);
try!(Errno::result(ret));
sockaddr_storage_to_addr(&addr, len as usize)
}
}
/// Get the current address to which the socket `fd` is bound.
///
/// [Further reading](http://man7.org/linux/man-pages/man2/getsockname.2.html)
pub fn getsockname(fd: RawFd) -> Result<SockAddr> {
unsafe {
let addr: sockaddr_storage = mem::uninitialized();
let mut len = mem::size_of::<sockaddr_storage>() as socklen_t;
let ret = ffi::getsockname(fd, mem::transmute(&addr), &mut len);
try!(Errno::result(ret));
sockaddr_storage_to_addr(&addr, len as usize)
}
}
/// Return the appropriate SockAddr type from a `sockaddr_storage` of a certain
/// size. In C this would usually be done by casting. The `len` argument
/// should be the number of bytes in the sockaddr_storage that are actually
/// allocated and valid. It must be at least as large as all the useful parts
/// of the structure. Note that in the case of a `sockaddr_un`, `len` need not
/// include the terminating null.
pub unsafe fn sockaddr_storage_to_addr(
addr: &sockaddr_storage,
len: usize) -> Result<SockAddr> {
if len < mem::size_of_val(&addr.ss_family) {
return Err(Error::Sys(Errno::ENOTCONN));
}
match addr.ss_family as c_int {
consts::AF_INET => {
assert!(len as usize == mem::size_of::<sockaddr_in>());
| let cmsg = cmsghdr { | random_line_split |
multi-byte-chars.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// compile-flags:-g
#![feature(non_ascii_idents)]
// This test checks whether debuginfo generation can handle multi-byte UTF-8
// characters at the end of a block. There's no need to do anything in the
// debugger -- just make sure that the compiler doesn't crash.
// See also issue #18791.
struct C { θ: u8 }
fn main() { |
let x = C { θ: 0 };
(|c: C| c.θ )(x);
}
| identifier_body |
|
multi-byte-chars.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// compile-flags:-g
#![feature(non_ascii_idents)]
| // See also issue #18791.
struct C { θ: u8 }
fn main() {
let x = C { θ: 0 };
(|c: C| c.θ )(x);
} | // This test checks whether debuginfo generation can handle multi-byte UTF-8
// characters at the end of a block. There's no need to do anything in the
// debugger -- just make sure that the compiler doesn't crash. | random_line_split |
multi-byte-chars.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// compile-flags:-g
#![feature(non_ascii_idents)]
// This test checks whether debuginfo generation can handle multi-byte UTF-8
// characters at the end of a block. There's no need to do anything in the
// debugger -- just make sure that the compiler doesn't crash.
// See also issue #18791.
struct C { θ: u8 }
fn m | ) {
let x = C { θ: 0 };
(|c: C| c.θ )(x);
}
| ain( | identifier_name |
htmlheadingelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLHeadingElementBinding;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[derive(JSTraceable, MallocSizeOf)]
pub enum HeadingLevel {
Heading1,
Heading2,
Heading3,
Heading4,
Heading5,
Heading6,
}
#[dom_struct]
pub struct HTMLHeadingElement {
htmlelement: HTMLElement,
level: HeadingLevel,
}
impl HTMLHeadingElement {
fn | (
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
level: HeadingLevel,
) -> HTMLHeadingElement {
HTMLHeadingElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
level: level,
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
level: HeadingLevel,
) -> DomRoot<HTMLHeadingElement> {
Node::reflect_node(
Box::new(HTMLHeadingElement::new_inherited(
local_name, prefix, document, level,
)),
document,
HTMLHeadingElementBinding::Wrap,
)
}
}
| new_inherited | identifier_name |
htmlheadingelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLHeadingElementBinding;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document; | #[derive(JSTraceable, MallocSizeOf)]
pub enum HeadingLevel {
Heading1,
Heading2,
Heading3,
Heading4,
Heading5,
Heading6,
}
#[dom_struct]
pub struct HTMLHeadingElement {
htmlelement: HTMLElement,
level: HeadingLevel,
}
impl HTMLHeadingElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
level: HeadingLevel,
) -> HTMLHeadingElement {
HTMLHeadingElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
level: level,
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
level: HeadingLevel,
) -> DomRoot<HTMLHeadingElement> {
Node::reflect_node(
Box::new(HTMLHeadingElement::new_inherited(
local_name, prefix, document, level,
)),
document,
HTMLHeadingElementBinding::Wrap,
)
}
} | use crate::dom::htmlelement::HTMLElement;
use crate::dom::node::Node;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
| random_line_split |
mod.rs | use na::{DMatrix};
use std::collections::{HashMap, HashSet};
use std::num::*;
use itertools::Itertools;
use std::cell::RefCell;
use std::rc::Rc;
use petgraph::{Graph};
use petgraph::graph::NodeIndex;
use petgraph::algo::*;
use rand;
pub fn do_stuff() | }
}
| {
let d = 7; //dimensions
let bin_width = 0.2; //bin size (assuming bins are squares
let bin_range_start = -100.0; //bin values cover -100
let bin_range_end = 100.0; //to 100
let bin_range = rand::distributions::Range::new(bin_range_start, bin_range_end);
let gap_time = 5; //time steps to run before adjusting clustering
let c_m = 3.0;
let c_l = 0.8;
let lambda = 0.998;
let beta = 0.3;
let mut rng = rand::thread_rng();
for t in 0..11 {
let rand_vec = rand::sample(&mut rng, -100..100, d);
println!("{:?}", rand_vec);
| identifier_body |
mod.rs | use na::{DMatrix};
use std::collections::{HashMap, HashSet};
use std::num::*;
use itertools::Itertools;
use std::cell::RefCell;
use std::rc::Rc;
use petgraph::{Graph};
use petgraph::graph::NodeIndex;
use petgraph::algo::*;
use rand;
pub fn do_stuff() {
let d = 7; //dimensions
let bin_width = 0.2; //bin size (assuming bins are squares
let bin_range_start = -100.0; //bin values cover -100
let bin_range_end = 100.0; //to 100
let bin_range = rand::distributions::Range::new(bin_range_start, bin_range_end);
let gap_time = 5; //time steps to run before adjusting clustering
let c_m = 3.0; | let mut rng = rand::thread_rng();
for t in 0..11 {
let rand_vec = rand::sample(&mut rng, -100..100, d);
println!("{:?}", rand_vec);
}
} | let c_l = 0.8;
let lambda = 0.998;
let beta = 0.3;
| random_line_split |
mod.rs | use na::{DMatrix};
use std::collections::{HashMap, HashSet};
use std::num::*;
use itertools::Itertools;
use std::cell::RefCell;
use std::rc::Rc;
use petgraph::{Graph};
use petgraph::graph::NodeIndex;
use petgraph::algo::*;
use rand;
pub fn | () {
let d = 7; //dimensions
let bin_width = 0.2; //bin size (assuming bins are squares
let bin_range_start = -100.0; //bin values cover -100
let bin_range_end = 100.0; //to 100
let bin_range = rand::distributions::Range::new(bin_range_start, bin_range_end);
let gap_time = 5; //time steps to run before adjusting clustering
let c_m = 3.0;
let c_l = 0.8;
let lambda = 0.998;
let beta = 0.3;
let mut rng = rand::thread_rng();
for t in 0..11 {
let rand_vec = rand::sample(&mut rng, -100..100, d);
println!("{:?}", rand_vec);
}
}
| do_stuff | identifier_name |
lub-if.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we correctly consider the type of `match` to be the LUB
// of the various arms, particularly in the case where regions are
// involved.
pub fn opt_str0<'a>(maybestr: &'a Option<~str>) -> &'a str {
if maybestr.is_none() {
"(none)"
} else {
let s: &'a str = *maybestr.get_ref();
s
}
}
pub fn opt_str1<'a>(maybestr: &'a Option<~str>) -> &'a str {
if maybestr.is_some() {
let s: &'a str = *maybestr.get_ref();
s
} else {
"(none)"
} | "(none)"
} else {
let s: &'a str = *maybestr.get_ref();
s
}
}
pub fn opt_str3<'a>(maybestr: &'a Option<~str>) -> &'static str {
if maybestr.is_some() { //~ ERROR mismatched types
let s: &'a str = *maybestr.get_ref();
s
} else {
"(none)"
}
}
fn main() {} | }
pub fn opt_str2<'a>(maybestr: &'a Option<~str>) -> &'static str {
if maybestr.is_none() { //~ ERROR mismatched types | random_line_split |
lub-if.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we correctly consider the type of `match` to be the LUB
// of the various arms, particularly in the case where regions are
// involved.
pub fn opt_str0<'a>(maybestr: &'a Option<~str>) -> &'a str |
pub fn opt_str1<'a>(maybestr: &'a Option<~str>) -> &'a str {
if maybestr.is_some() {
let s: &'a str = *maybestr.get_ref();
s
} else {
"(none)"
}
}
pub fn opt_str2<'a>(maybestr: &'a Option<~str>) -> &'static str {
if maybestr.is_none() { //~ ERROR mismatched types
"(none)"
} else {
let s: &'a str = *maybestr.get_ref();
s
}
}
pub fn opt_str3<'a>(maybestr: &'a Option<~str>) -> &'static str {
if maybestr.is_some() { //~ ERROR mismatched types
let s: &'a str = *maybestr.get_ref();
s
} else {
"(none)"
}
}
fn main() {}
| {
if maybestr.is_none() {
"(none)"
} else {
let s: &'a str = *maybestr.get_ref();
s
}
} | identifier_body |
lub-if.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we correctly consider the type of `match` to be the LUB
// of the various arms, particularly in the case where regions are
// involved.
pub fn opt_str0<'a>(maybestr: &'a Option<~str>) -> &'a str {
if maybestr.is_none() {
"(none)"
} else {
let s: &'a str = *maybestr.get_ref();
s
}
}
pub fn opt_str1<'a>(maybestr: &'a Option<~str>) -> &'a str {
if maybestr.is_some() {
let s: &'a str = *maybestr.get_ref();
s
} else {
"(none)"
}
}
pub fn opt_str2<'a>(maybestr: &'a Option<~str>) -> &'static str {
if maybestr.is_none() { //~ ERROR mismatched types
"(none)"
} else {
let s: &'a str = *maybestr.get_ref();
s
}
}
pub fn opt_str3<'a>(maybestr: &'a Option<~str>) -> &'static str {
if maybestr.is_some() { //~ ERROR mismatched types
let s: &'a str = *maybestr.get_ref();
s
} else {
"(none)"
}
}
fn | () {}
| main | identifier_name |
lub-if.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we correctly consider the type of `match` to be the LUB
// of the various arms, particularly in the case where regions are
// involved.
pub fn opt_str0<'a>(maybestr: &'a Option<~str>) -> &'a str {
if maybestr.is_none() | else {
let s: &'a str = *maybestr.get_ref();
s
}
}
pub fn opt_str1<'a>(maybestr: &'a Option<~str>) -> &'a str {
if maybestr.is_some() {
let s: &'a str = *maybestr.get_ref();
s
} else {
"(none)"
}
}
pub fn opt_str2<'a>(maybestr: &'a Option<~str>) -> &'static str {
if maybestr.is_none() { //~ ERROR mismatched types
"(none)"
} else {
let s: &'a str = *maybestr.get_ref();
s
}
}
pub fn opt_str3<'a>(maybestr: &'a Option<~str>) -> &'static str {
if maybestr.is_some() { //~ ERROR mismatched types
let s: &'a str = *maybestr.get_ref();
s
} else {
"(none)"
}
}
fn main() {}
| {
"(none)"
} | conditional_block |
logfwd.rs | use libc::c_char;
use std::sync::Once;
use crate::raw;
/// Manually initialize logging.
///
/// It is optional to call this function, and safe to do so more than once.
pub fn | () {
static mut INIT: Once = Once::new();
unsafe {
INIT.call_once(|| {
init_impl();
});
}
}
#[cfg(feature = "log")]
fn init_impl() {
use log::LevelFilter;
// trace to our callback
unsafe {
raw::rs_trace_to(trace);
}
// determine log level
// this is useful because if the setted level is not Debug we can optimize librsync log
// calls
let level = match log::max_level() {
LevelFilter::Info => raw::RS_LOG_NOTICE,
LevelFilter::Debug | LevelFilter::Trace => raw::RS_LOG_DEBUG,
_ => raw::RS_LOG_WARNING,
};
unsafe {
raw::rs_trace_set_level(level);
}
}
#[cfg(feature = "log")]
extern "C" fn trace(level: raw::rs_loglevel, msg: *const c_char) {
use log::Level;
use std::ffi::CStr;
let level = match level {
raw::RS_LOG_EMERG | raw::RS_LOG_ALERT | raw::RS_LOG_CRIT | raw::RS_LOG_ERR => Level::Error,
raw::RS_LOG_WARNING => Level::Warn,
raw::RS_LOG_NOTICE | raw::RS_LOG_INFO => Level::Info,
raw::RS_LOG_DEBUG => Level::Debug,
_ => Level::Error,
};
let msg = unsafe { CStr::from_ptr(msg).to_string_lossy() };
log!(target: "librsync", level, "{}", msg);
}
#[cfg(not(feature = "log"))]
fn init_impl() {
unsafe {
raw::rs_trace_to(trace);
raw::rs_trace_set_level(raw::RS_LOG_EMERG);
}
extern "C" fn trace(_level: raw::rs_loglevel, _msg: *const c_char) {}
}
| init | identifier_name |
logfwd.rs | use libc::c_char;
use std::sync::Once;
use crate::raw; | /// Manually initialize logging.
///
/// It is optional to call this function, and safe to do so more than once.
pub fn init() {
static mut INIT: Once = Once::new();
unsafe {
INIT.call_once(|| {
init_impl();
});
}
}
#[cfg(feature = "log")]
fn init_impl() {
use log::LevelFilter;
// trace to our callback
unsafe {
raw::rs_trace_to(trace);
}
// determine log level
// this is useful because if the setted level is not Debug we can optimize librsync log
// calls
let level = match log::max_level() {
LevelFilter::Info => raw::RS_LOG_NOTICE,
LevelFilter::Debug | LevelFilter::Trace => raw::RS_LOG_DEBUG,
_ => raw::RS_LOG_WARNING,
};
unsafe {
raw::rs_trace_set_level(level);
}
}
#[cfg(feature = "log")]
extern "C" fn trace(level: raw::rs_loglevel, msg: *const c_char) {
use log::Level;
use std::ffi::CStr;
let level = match level {
raw::RS_LOG_EMERG | raw::RS_LOG_ALERT | raw::RS_LOG_CRIT | raw::RS_LOG_ERR => Level::Error,
raw::RS_LOG_WARNING => Level::Warn,
raw::RS_LOG_NOTICE | raw::RS_LOG_INFO => Level::Info,
raw::RS_LOG_DEBUG => Level::Debug,
_ => Level::Error,
};
let msg = unsafe { CStr::from_ptr(msg).to_string_lossy() };
log!(target: "librsync", level, "{}", msg);
}
#[cfg(not(feature = "log"))]
fn init_impl() {
unsafe {
raw::rs_trace_to(trace);
raw::rs_trace_set_level(raw::RS_LOG_EMERG);
}
extern "C" fn trace(_level: raw::rs_loglevel, _msg: *const c_char) {}
} | random_line_split |
|
logfwd.rs | use libc::c_char;
use std::sync::Once;
use crate::raw;
/// Manually initialize logging.
///
/// It is optional to call this function, and safe to do so more than once.
pub fn init() {
static mut INIT: Once = Once::new();
unsafe {
INIT.call_once(|| {
init_impl();
});
}
}
#[cfg(feature = "log")]
fn init_impl() {
use log::LevelFilter;
// trace to our callback
unsafe {
raw::rs_trace_to(trace);
}
// determine log level
// this is useful because if the setted level is not Debug we can optimize librsync log
// calls
let level = match log::max_level() {
LevelFilter::Info => raw::RS_LOG_NOTICE,
LevelFilter::Debug | LevelFilter::Trace => raw::RS_LOG_DEBUG,
_ => raw::RS_LOG_WARNING,
};
unsafe {
raw::rs_trace_set_level(level);
}
}
#[cfg(feature = "log")]
extern "C" fn trace(level: raw::rs_loglevel, msg: *const c_char) {
use log::Level;
use std::ffi::CStr;
let level = match level {
raw::RS_LOG_EMERG | raw::RS_LOG_ALERT | raw::RS_LOG_CRIT | raw::RS_LOG_ERR => Level::Error,
raw::RS_LOG_WARNING => Level::Warn,
raw::RS_LOG_NOTICE | raw::RS_LOG_INFO => Level::Info,
raw::RS_LOG_DEBUG => Level::Debug,
_ => Level::Error,
};
let msg = unsafe { CStr::from_ptr(msg).to_string_lossy() };
log!(target: "librsync", level, "{}", msg);
}
#[cfg(not(feature = "log"))]
fn init_impl() {
unsafe {
raw::rs_trace_to(trace);
raw::rs_trace_set_level(raw::RS_LOG_EMERG);
}
extern "C" fn trace(_level: raw::rs_loglevel, _msg: *const c_char) |
}
| {} | identifier_body |
gigasecond.rs | use time::PrimitiveDateTime as DateTime;
/// Create a datetime from the given numeric point in time.
///
/// Panics if any field is invalid.
fn dt(year: i32, month: u8, day: u8, hour: u8, minute: u8, second: u8) -> DateTime {
use time::{Date, Time};
DateTime::new(
Date::from_calendar_date(year, month.try_into().unwrap(), day).unwrap(),
Time::from_hms(hour, minute, second).unwrap(),
)
}
#[test]
fn test_date() {
let start_date = dt(2011, 4, 25, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2043, 1, 1, 1, 46, 40));
}
#[test]
#[ignore]
fn test_another_date() {
let start_date = dt(1977, 6, 13, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2009, 2, 19, 1, 46, 40));
}
#[test]
#[ignore]
fn test_third_date() {
let start_date = dt(1959, 7, 19, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(1991, 3, 27, 1, 46, 40));
}
#[test]
#[ignore]
fn test_datetime() {
let start_date = dt(2015, 1, 24, 22, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2046, 10, 2, 23, 46, 40));
}
#[test]
#[ignore]
fn test_another_datetime() | {
let start_date = dt(2015, 1, 24, 23, 59, 59);
assert_eq!(gigasecond::after(start_date), dt(2046, 10, 3, 1, 46, 39));
} | identifier_body |
|
gigasecond.rs | use time::PrimitiveDateTime as DateTime;
/// Create a datetime from the given numeric point in time.
///
/// Panics if any field is invalid.
fn dt(year: i32, month: u8, day: u8, hour: u8, minute: u8, second: u8) -> DateTime {
use time::{Date, Time};
DateTime::new(
Date::from_calendar_date(year, month.try_into().unwrap(), day).unwrap(),
Time::from_hms(hour, minute, second).unwrap(),
)
}
#[test]
fn test_date() {
let start_date = dt(2011, 4, 25, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2043, 1, 1, 1, 46, 40));
}
#[test]
#[ignore] | let start_date = dt(1977, 6, 13, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2009, 2, 19, 1, 46, 40));
}
#[test]
#[ignore]
fn test_third_date() {
let start_date = dt(1959, 7, 19, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(1991, 3, 27, 1, 46, 40));
}
#[test]
#[ignore]
fn test_datetime() {
let start_date = dt(2015, 1, 24, 22, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2046, 10, 2, 23, 46, 40));
}
#[test]
#[ignore]
fn test_another_datetime() {
let start_date = dt(2015, 1, 24, 23, 59, 59);
assert_eq!(gigasecond::after(start_date), dt(2046, 10, 3, 1, 46, 39));
} | fn test_another_date() { | random_line_split |
gigasecond.rs | use time::PrimitiveDateTime as DateTime;
/// Create a datetime from the given numeric point in time.
///
/// Panics if any field is invalid.
fn dt(year: i32, month: u8, day: u8, hour: u8, minute: u8, second: u8) -> DateTime {
use time::{Date, Time};
DateTime::new(
Date::from_calendar_date(year, month.try_into().unwrap(), day).unwrap(),
Time::from_hms(hour, minute, second).unwrap(),
)
}
#[test]
fn test_date() {
let start_date = dt(2011, 4, 25, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2043, 1, 1, 1, 46, 40));
}
#[test]
#[ignore]
fn | () {
let start_date = dt(1977, 6, 13, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2009, 2, 19, 1, 46, 40));
}
#[test]
#[ignore]
fn test_third_date() {
let start_date = dt(1959, 7, 19, 0, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(1991, 3, 27, 1, 46, 40));
}
#[test]
#[ignore]
fn test_datetime() {
let start_date = dt(2015, 1, 24, 22, 0, 0);
assert_eq!(gigasecond::after(start_date), dt(2046, 10, 2, 23, 46, 40));
}
#[test]
#[ignore]
fn test_another_datetime() {
let start_date = dt(2015, 1, 24, 23, 59, 59);
assert_eq!(gigasecond::after(start_date), dt(2046, 10, 3, 1, 46, 39));
}
| test_another_date | identifier_name |
toast.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{components::font_awesome_outline, extensions::NodeExt, generated::css_classes::C};
use seed::{prelude::*, *};
#[derive(Clone, Copy, Debug)]
pub enum | {
Close,
}
pub enum Model {
Success(String),
Warn(String),
Error(String),
}
pub fn view(model: &Model) -> Node<Msg> {
let (toast_bg, toast_status_bg, icon, status_txt, x) = match model {
Model::Success(x) => (C.bg_green_600, C.bg_green_500, "check-circle", "Success", x),
Model::Warn(x) => (C.bg_yellow_600, C.bg_yellow_500, "bell", "Warning", x),
Model::Error(x) => (C.bg_red_600, C.bg_red_500, "bell", "Error", x),
};
div![
class![
C.text_white,
C.fade_in,
C.p_2,
toast_bg,
C.items_center,
C.leading_none,
C.rounded_full,
C.flex,
C.inline_flex,
],
span![
class![
C.flex,
C.items_center,
C.rounded_full,
toast_status_bg,
C.px_2,
C.py_1,
C.text_xs,
C.font_bold,
C.mr_3,
],
font_awesome_outline(class![C.h_4, C.w_4, C.mr_1, C.inline], icon),
status_txt,
],
span![class![C.font_semibold, C.mr_2, C.text_left, C.flex_auto], x],
font_awesome_outline(class![C.h_4, C.w_4, C.ml_1, C.inline, C.cursor_pointer], "times-circle")
.with_listener(simple_ev(Ev::Click, Msg::Close))
]
}
| Msg | identifier_name |
toast.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{components::font_awesome_outline, extensions::NodeExt, generated::css_classes::C};
use seed::{prelude::*, *};
#[derive(Clone, Copy, Debug)]
pub enum Msg {
Close,
}
pub enum Model {
Success(String),
Warn(String),
Error(String),
}
pub fn view(model: &Model) -> Node<Msg> {
let (toast_bg, toast_status_bg, icon, status_txt, x) = match model {
Model::Success(x) => (C.bg_green_600, C.bg_green_500, "check-circle", "Success", x),
Model::Warn(x) => (C.bg_yellow_600, C.bg_yellow_500, "bell", "Warning", x),
Model::Error(x) => (C.bg_red_600, C.bg_red_500, "bell", "Error", x),
};
div![
class![
C.text_white,
C.fade_in,
C.p_2,
toast_bg,
C.items_center,
C.leading_none, | class![
C.flex,
C.items_center,
C.rounded_full,
toast_status_bg,
C.px_2,
C.py_1,
C.text_xs,
C.font_bold,
C.mr_3,
],
font_awesome_outline(class![C.h_4, C.w_4, C.mr_1, C.inline], icon),
status_txt,
],
span![class![C.font_semibold, C.mr_2, C.text_left, C.flex_auto], x],
font_awesome_outline(class![C.h_4, C.w_4, C.ml_1, C.inline, C.cursor_pointer], "times-circle")
.with_listener(simple_ev(Ev::Click, Msg::Close))
]
} | C.rounded_full,
C.flex,
C.inline_flex,
],
span![ | random_line_split |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::DisplayListBuildState;
use flow::{CAN_BE_FRAGMENTED, Flow, ImmutableFlowUtils, PostorderFlowTraversal};
use flow::{PreorderFlowTraversal, self};
use gfx::display_list::OpaqueNode;
use incremental::{BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, REPAINT, RestyleDamage};
use std::mem;
use style::context::StyleContext;
use style::matching::MatchMethods;
use style::traversal::{DomTraversalContext, STYLE_BLOOM};
use style::traversal::{put_thread_local_bloom_filter, recalc_style_at};
use util::opts;
use util::tid::tid;
use wrapper::{LayoutNode, ServoLayoutNode, ThreadSafeLayoutNode};
pub struct RecalcStyleAndConstructFlows<'lc> {
context: LayoutContext<'lc>,
root: OpaqueNode,
}
impl<'lc, 'ln> DomTraversalContext<ServoLayoutNode<'ln>> for RecalcStyleAndConstructFlows<'lc> {
type SharedContext = SharedLayoutContext;
#[allow(unsafe_code)]
fn new<'a>(shared: &'a Self::SharedContext, root: OpaqueNode) -> Self {
// FIXME(bholley): This transmutation from &'a to &'lc is very unfortunate, but I haven't
// found a way to avoid it despite spending several days on it (and consulting Manishearth,
// brson, and nmatsakis).
//
// The crux of the problem is that parameterizing DomTraversalContext on the lifetime of
// the SharedContext doesn't work for a variety of reasons [1]. However, the code in
// parallel.rs needs to be able to use the DomTraversalContext trait (or something similar)
// to stack-allocate a struct (a generalized LayoutContext<'a>) that holds a borrowed
// SharedContext, which means that the struct needs to be parameterized on a lifetime.
// Given the aforementioned constraint, the only way to accomplish this is to avoid
// propagating the borrow lifetime from the struct to the trait, but that means that the
// new() method on the trait cannot require the lifetime of its argument to match the
// lifetime of the Self object it creates.
//
// This could be solved with an associated type with an unbound lifetime parameter, but
// that would require higher-kinded types, which don't exist yet and probably aren't coming
// for a while.
//
// So we transmute. :-( This is safe because the DomTravesalContext is stack-allocated on
// the worker thread while processing a WorkUnit, whereas the borrowed SharedContext is
// live for the entire duration of the restyle. This really could _almost_ compile: all
// we'd need to do is change the signature to to |new<'a: 'lc>|, and everything would
// work great. But we can't do that, because that would cause a mismatch with the signature
// in the trait we're implementing, and we can't mention 'lc in that trait at all for the
// reasons described above.
//
// [1] For example, the WorkQueue type needs to be parameterized on the concrete type of
// DomTraversalContext::SharedContext, and the WorkQueue lifetime is similar to that of the
// LayoutThread, generally much longer than that of a given SharedLayoutContext borrow.
let shared_lc: &'lc SharedLayoutContext = unsafe { mem::transmute(shared) };
RecalcStyleAndConstructFlows {
context: LayoutContext::new(shared_lc),
root: root,
}
}
fn process_preorder(&self, node: ServoLayoutNode<'ln>) { recalc_style_at(&self.context, self.root, node); }
fn process_postorder(&self, node: ServoLayoutNode<'ln>) { construct_flows_at(&self.context, self.root, node); }
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode) -> bool;
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<'a, N: LayoutNode>(context: &'a LayoutContext<'a>, root: OpaqueNode, node: N) {
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || node.has_dirty_descendants() {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}", | tnode.debug_id(),
tnode.flow_debug_id());
}
}
// Reset the layout damage in this node. It's been propagated to the
// flow by the flow constructor.
tnode.set_restyle_damage(RestyleDamage::empty());
}
unsafe {
node.set_changed(false);
node.set_dirty(false);
node.set_dirty_descendants(false);
}
let unsafe_layout_node = node.to_unsafe();
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
mem::replace(&mut *style_bloom.borrow_mut(), None)
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, context.shared_context().generation);
match node.layout_parent_node(root) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the thread-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = parent.to_unsafe();
put_thread_local_bloom_filter(bf, &unsafe_parent, &context.shared_context());
},
};
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow::base(flow);
base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(CAN_BE_FRAGMENTED)
}
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
if self.should_process() {
self.state.push_stacking_context_id(flow::base(flow).stacking_context_id);
flow.build_display_list(&mut self.state);
flow::mut_base(flow).restyle_damage.remove(REPAINT);
self.state.pop_stacking_context_id();
}
for kid in flow::child_iter_mut(flow) {
self.traverse(kid);
}
}
#[inline]
fn should_process(&self) -> bool {
true
}
} | random_line_split |
|
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::DisplayListBuildState;
use flow::{CAN_BE_FRAGMENTED, Flow, ImmutableFlowUtils, PostorderFlowTraversal};
use flow::{PreorderFlowTraversal, self};
use gfx::display_list::OpaqueNode;
use incremental::{BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, REPAINT, RestyleDamage};
use std::mem;
use style::context::StyleContext;
use style::matching::MatchMethods;
use style::traversal::{DomTraversalContext, STYLE_BLOOM};
use style::traversal::{put_thread_local_bloom_filter, recalc_style_at};
use util::opts;
use util::tid::tid;
use wrapper::{LayoutNode, ServoLayoutNode, ThreadSafeLayoutNode};
pub struct RecalcStyleAndConstructFlows<'lc> {
context: LayoutContext<'lc>,
root: OpaqueNode,
}
impl<'lc, 'ln> DomTraversalContext<ServoLayoutNode<'ln>> for RecalcStyleAndConstructFlows<'lc> {
type SharedContext = SharedLayoutContext;
#[allow(unsafe_code)]
fn | <'a>(shared: &'a Self::SharedContext, root: OpaqueNode) -> Self {
// FIXME(bholley): This transmutation from &'a to &'lc is very unfortunate, but I haven't
// found a way to avoid it despite spending several days on it (and consulting Manishearth,
// brson, and nmatsakis).
//
// The crux of the problem is that parameterizing DomTraversalContext on the lifetime of
// the SharedContext doesn't work for a variety of reasons [1]. However, the code in
// parallel.rs needs to be able to use the DomTraversalContext trait (or something similar)
// to stack-allocate a struct (a generalized LayoutContext<'a>) that holds a borrowed
// SharedContext, which means that the struct needs to be parameterized on a lifetime.
// Given the aforementioned constraint, the only way to accomplish this is to avoid
// propagating the borrow lifetime from the struct to the trait, but that means that the
// new() method on the trait cannot require the lifetime of its argument to match the
// lifetime of the Self object it creates.
//
// This could be solved with an associated type with an unbound lifetime parameter, but
// that would require higher-kinded types, which don't exist yet and probably aren't coming
// for a while.
//
// So we transmute. :-( This is safe because the DomTravesalContext is stack-allocated on
// the worker thread while processing a WorkUnit, whereas the borrowed SharedContext is
// live for the entire duration of the restyle. This really could _almost_ compile: all
// we'd need to do is change the signature to to |new<'a: 'lc>|, and everything would
// work great. But we can't do that, because that would cause a mismatch with the signature
// in the trait we're implementing, and we can't mention 'lc in that trait at all for the
// reasons described above.
//
// [1] For example, the WorkQueue type needs to be parameterized on the concrete type of
// DomTraversalContext::SharedContext, and the WorkQueue lifetime is similar to that of the
// LayoutThread, generally much longer than that of a given SharedLayoutContext borrow.
let shared_lc: &'lc SharedLayoutContext = unsafe { mem::transmute(shared) };
RecalcStyleAndConstructFlows {
context: LayoutContext::new(shared_lc),
root: root,
}
}
fn process_preorder(&self, node: ServoLayoutNode<'ln>) { recalc_style_at(&self.context, self.root, node); }
fn process_postorder(&self, node: ServoLayoutNode<'ln>) { construct_flows_at(&self.context, self.root, node); }
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode) -> bool;
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<'a, N: LayoutNode>(context: &'a LayoutContext<'a>, root: OpaqueNode, node: N) {
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || node.has_dirty_descendants() {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
// Reset the layout damage in this node. It's been propagated to the
// flow by the flow constructor.
tnode.set_restyle_damage(RestyleDamage::empty());
}
unsafe {
node.set_changed(false);
node.set_dirty(false);
node.set_dirty_descendants(false);
}
let unsafe_layout_node = node.to_unsafe();
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
mem::replace(&mut *style_bloom.borrow_mut(), None)
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, context.shared_context().generation);
match node.layout_parent_node(root) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the thread-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = parent.to_unsafe();
put_thread_local_bloom_filter(bf, &unsafe_parent, &context.shared_context());
},
};
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow::base(flow);
base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(CAN_BE_FRAGMENTED)
}
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
if self.should_process() {
self.state.push_stacking_context_id(flow::base(flow).stacking_context_id);
flow.build_display_list(&mut self.state);
flow::mut_base(flow).restyle_damage.remove(REPAINT);
self.state.pop_stacking_context_id();
}
for kid in flow::child_iter_mut(flow) {
self.traverse(kid);
}
}
#[inline]
fn should_process(&self) -> bool {
true
}
}
| new | identifier_name |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::DisplayListBuildState;
use flow::{CAN_BE_FRAGMENTED, Flow, ImmutableFlowUtils, PostorderFlowTraversal};
use flow::{PreorderFlowTraversal, self};
use gfx::display_list::OpaqueNode;
use incremental::{BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, REPAINT, RestyleDamage};
use std::mem;
use style::context::StyleContext;
use style::matching::MatchMethods;
use style::traversal::{DomTraversalContext, STYLE_BLOOM};
use style::traversal::{put_thread_local_bloom_filter, recalc_style_at};
use util::opts;
use util::tid::tid;
use wrapper::{LayoutNode, ServoLayoutNode, ThreadSafeLayoutNode};
pub struct RecalcStyleAndConstructFlows<'lc> {
context: LayoutContext<'lc>,
root: OpaqueNode,
}
impl<'lc, 'ln> DomTraversalContext<ServoLayoutNode<'ln>> for RecalcStyleAndConstructFlows<'lc> {
type SharedContext = SharedLayoutContext;
#[allow(unsafe_code)]
fn new<'a>(shared: &'a Self::SharedContext, root: OpaqueNode) -> Self {
// FIXME(bholley): This transmutation from &'a to &'lc is very unfortunate, but I haven't
// found a way to avoid it despite spending several days on it (and consulting Manishearth,
// brson, and nmatsakis).
//
// The crux of the problem is that parameterizing DomTraversalContext on the lifetime of
// the SharedContext doesn't work for a variety of reasons [1]. However, the code in
// parallel.rs needs to be able to use the DomTraversalContext trait (or something similar)
// to stack-allocate a struct (a generalized LayoutContext<'a>) that holds a borrowed
// SharedContext, which means that the struct needs to be parameterized on a lifetime.
// Given the aforementioned constraint, the only way to accomplish this is to avoid
// propagating the borrow lifetime from the struct to the trait, but that means that the
// new() method on the trait cannot require the lifetime of its argument to match the
// lifetime of the Self object it creates.
//
// This could be solved with an associated type with an unbound lifetime parameter, but
// that would require higher-kinded types, which don't exist yet and probably aren't coming
// for a while.
//
// So we transmute. :-( This is safe because the DomTravesalContext is stack-allocated on
// the worker thread while processing a WorkUnit, whereas the borrowed SharedContext is
// live for the entire duration of the restyle. This really could _almost_ compile: all
// we'd need to do is change the signature to to |new<'a: 'lc>|, and everything would
// work great. But we can't do that, because that would cause a mismatch with the signature
// in the trait we're implementing, and we can't mention 'lc in that trait at all for the
// reasons described above.
//
// [1] For example, the WorkQueue type needs to be parameterized on the concrete type of
// DomTraversalContext::SharedContext, and the WorkQueue lifetime is similar to that of the
// LayoutThread, generally much longer than that of a given SharedLayoutContext borrow.
let shared_lc: &'lc SharedLayoutContext = unsafe { mem::transmute(shared) };
RecalcStyleAndConstructFlows {
context: LayoutContext::new(shared_lc),
root: root,
}
}
fn process_preorder(&self, node: ServoLayoutNode<'ln>) { recalc_style_at(&self.context, self.root, node); }
fn process_postorder(&self, node: ServoLayoutNode<'ln>) { construct_flows_at(&self.context, self.root, node); }
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode) -> bool;
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<'a, N: LayoutNode>(context: &'a LayoutContext<'a>, root: OpaqueNode, node: N) {
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || node.has_dirty_descendants() {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
// Reset the layout damage in this node. It's been propagated to the
// flow by the flow constructor.
tnode.set_restyle_damage(RestyleDamage::empty());
}
unsafe {
node.set_changed(false);
node.set_dirty(false);
node.set_dirty_descendants(false);
}
let unsafe_layout_node = node.to_unsafe();
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
mem::replace(&mut *style_bloom.borrow_mut(), None)
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, context.shared_context().generation);
match node.layout_parent_node(root) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the thread-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = parent.to_unsafe();
put_thread_local_bloom_filter(bf, &unsafe_parent, &context.shared_context());
},
};
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool |
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
if self.should_process() {
self.state.push_stacking_context_id(flow::base(flow).stacking_context_id);
flow.build_display_list(&mut self.state);
flow::mut_base(flow).restyle_damage.remove(REPAINT);
self.state.pop_stacking_context_id();
}
for kid in flow::child_iter_mut(flow) {
self.traverse(kid);
}
}
#[inline]
fn should_process(&self) -> bool {
true
}
}
| {
let base = flow::base(flow);
base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(CAN_BE_FRAGMENTED)
} | identifier_body |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::DisplayListBuildState;
use flow::{CAN_BE_FRAGMENTED, Flow, ImmutableFlowUtils, PostorderFlowTraversal};
use flow::{PreorderFlowTraversal, self};
use gfx::display_list::OpaqueNode;
use incremental::{BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, REPAINT, RestyleDamage};
use std::mem;
use style::context::StyleContext;
use style::matching::MatchMethods;
use style::traversal::{DomTraversalContext, STYLE_BLOOM};
use style::traversal::{put_thread_local_bloom_filter, recalc_style_at};
use util::opts;
use util::tid::tid;
use wrapper::{LayoutNode, ServoLayoutNode, ThreadSafeLayoutNode};
pub struct RecalcStyleAndConstructFlows<'lc> {
context: LayoutContext<'lc>,
root: OpaqueNode,
}
impl<'lc, 'ln> DomTraversalContext<ServoLayoutNode<'ln>> for RecalcStyleAndConstructFlows<'lc> {
type SharedContext = SharedLayoutContext;
#[allow(unsafe_code)]
fn new<'a>(shared: &'a Self::SharedContext, root: OpaqueNode) -> Self {
// FIXME(bholley): This transmutation from &'a to &'lc is very unfortunate, but I haven't
// found a way to avoid it despite spending several days on it (and consulting Manishearth,
// brson, and nmatsakis).
//
// The crux of the problem is that parameterizing DomTraversalContext on the lifetime of
// the SharedContext doesn't work for a variety of reasons [1]. However, the code in
// parallel.rs needs to be able to use the DomTraversalContext trait (or something similar)
// to stack-allocate a struct (a generalized LayoutContext<'a>) that holds a borrowed
// SharedContext, which means that the struct needs to be parameterized on a lifetime.
// Given the aforementioned constraint, the only way to accomplish this is to avoid
// propagating the borrow lifetime from the struct to the trait, but that means that the
// new() method on the trait cannot require the lifetime of its argument to match the
// lifetime of the Self object it creates.
//
// This could be solved with an associated type with an unbound lifetime parameter, but
// that would require higher-kinded types, which don't exist yet and probably aren't coming
// for a while.
//
// So we transmute. :-( This is safe because the DomTravesalContext is stack-allocated on
// the worker thread while processing a WorkUnit, whereas the borrowed SharedContext is
// live for the entire duration of the restyle. This really could _almost_ compile: all
// we'd need to do is change the signature to to |new<'a: 'lc>|, and everything would
// work great. But we can't do that, because that would cause a mismatch with the signature
// in the trait we're implementing, and we can't mention 'lc in that trait at all for the
// reasons described above.
//
// [1] For example, the WorkQueue type needs to be parameterized on the concrete type of
// DomTraversalContext::SharedContext, and the WorkQueue lifetime is similar to that of the
// LayoutThread, generally much longer than that of a given SharedLayoutContext borrow.
let shared_lc: &'lc SharedLayoutContext = unsafe { mem::transmute(shared) };
RecalcStyleAndConstructFlows {
context: LayoutContext::new(shared_lc),
root: root,
}
}
fn process_preorder(&self, node: ServoLayoutNode<'ln>) { recalc_style_at(&self.context, self.root, node); }
fn process_postorder(&self, node: ServoLayoutNode<'ln>) { construct_flows_at(&self.context, self.root, node); }
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode) -> bool;
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<'a, N: LayoutNode>(context: &'a LayoutContext<'a>, root: OpaqueNode, node: N) {
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || node.has_dirty_descendants() {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
// Reset the layout damage in this node. It's been propagated to the
// flow by the flow constructor.
tnode.set_restyle_damage(RestyleDamage::empty());
}
unsafe {
node.set_changed(false);
node.set_dirty(false);
node.set_dirty_descendants(false);
}
let unsafe_layout_node = node.to_unsafe();
let (mut bf, old_node, old_generation) =
STYLE_BLOOM.with(|style_bloom| {
mem::replace(&mut *style_bloom.borrow_mut(), None)
.expect("The bloom filter should have been set by style recalc.")
});
assert_eq!(old_node, unsafe_layout_node);
assert_eq!(old_generation, context.shared_context().generation);
match node.layout_parent_node(root) {
None => {
debug!("[{}] - {:X}, and deleting BF.", tid(), unsafe_layout_node.0);
// If this is the reflow root, eat the thread-local bloom filter.
}
Some(parent) => {
// Otherwise, put it back, but remove this node.
node.remove_from_bloom_filter(&mut *bf);
let unsafe_parent = parent.to_unsafe();
put_thread_local_bloom_filter(bf, &unsafe_parent, &context.shared_context());
},
};
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow::base(flow);
base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(CAN_BE_FRAGMENTED)
}
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
if self.should_process() |
for kid in flow::child_iter_mut(flow) {
self.traverse(kid);
}
}
#[inline]
fn should_process(&self) -> bool {
true
}
}
| {
self.state.push_stacking_context_id(flow::base(flow).stacking_context_id);
flow.build_display_list(&mut self.state);
flow::mut_base(flow).restyle_damage.remove(REPAINT);
self.state.pop_stacking_context_id();
} | conditional_block |
lib.rs | extern crate diesel;
extern crate diesel_dynamic_schema;
use diesel::sql_types::*;
use diesel::*;
use diesel_dynamic_schema::{schema, table};
mod dynamic_values;
mod connection_setup;
use connection_setup::{create_user_table, establish_connection};
#[cfg(feature = "postgres")]
type Backend = diesel::pg::Pg;
#[cfg(feature = "mysql")]
type Backend = diesel::mysql::Mysql;
#[cfg(feature = "sqlite")]
type Backend = diesel::sqlite::Sqlite;
#[test]
fn querying_basic_schemas() {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users(name) VALUES ('Sean')")
.execute(&conn)
.unwrap();
let users = table("users");
let name = users.column::<Text, _>("name");
let names = users.select(name).load::<String>(&conn);
assert_eq!(Ok(vec!["Sean".into()]), names);
}
#[test]
fn querying_multiple_types() {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')")
.execute(&conn)
.unwrap();
let users = table("users");
let hair_color = users.column::<Nullable<Text>, _>("hair_color");
let name = users.column::<Text, _>("name");
let users = users
.select((name, hair_color))
.load::<(String, Option<String>)>(&conn);
assert_eq!(
Ok(vec![("Sean".into(), None), ("Tess".into(), None)]),
users
);
}
#[test]
fn columns_used_in_where_clause() |
#[test]
fn providing_custom_schema_name() {
let table = schema("information_schema").table("users");
let sql = debug_query::<Backend, _>(&table);
#[cfg(feature = "postgres")]
assert_eq!(
r#""information_schema"."users" -- binds: []"#,
sql.to_string()
);
#[cfg(not(feature = "postgres"))]
assert_eq!("`information_schema`.`users` -- binds: []", sql.to_string());
}
| {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')")
.execute(&conn)
.unwrap();
let users = table("users");
let name = users.column::<Text, _>("name");
let users = users
.select(name)
.filter(name.eq("Sean"))
.load::<String>(&conn);
assert_eq!(Ok(vec!["Sean".into()]), users);
} | identifier_body |
lib.rs | extern crate diesel;
extern crate diesel_dynamic_schema;
use diesel::sql_types::*;
use diesel::*;
use diesel_dynamic_schema::{schema, table};
mod dynamic_values;
mod connection_setup;
use connection_setup::{create_user_table, establish_connection};
#[cfg(feature = "postgres")]
type Backend = diesel::pg::Pg;
#[cfg(feature = "mysql")]
type Backend = diesel::mysql::Mysql;
#[cfg(feature = "sqlite")]
type Backend = diesel::sqlite::Sqlite;
#[test]
fn | () {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users(name) VALUES ('Sean')")
.execute(&conn)
.unwrap();
let users = table("users");
let name = users.column::<Text, _>("name");
let names = users.select(name).load::<String>(&conn);
assert_eq!(Ok(vec!["Sean".into()]), names);
}
#[test]
fn querying_multiple_types() {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')")
.execute(&conn)
.unwrap();
let users = table("users");
let hair_color = users.column::<Nullable<Text>, _>("hair_color");
let name = users.column::<Text, _>("name");
let users = users
.select((name, hair_color))
.load::<(String, Option<String>)>(&conn);
assert_eq!(
Ok(vec![("Sean".into(), None), ("Tess".into(), None)]),
users
);
}
#[test]
fn columns_used_in_where_clause() {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')")
.execute(&conn)
.unwrap();
let users = table("users");
let name = users.column::<Text, _>("name");
let users = users
.select(name)
.filter(name.eq("Sean"))
.load::<String>(&conn);
assert_eq!(Ok(vec!["Sean".into()]), users);
}
#[test]
fn providing_custom_schema_name() {
let table = schema("information_schema").table("users");
let sql = debug_query::<Backend, _>(&table);
#[cfg(feature = "postgres")]
assert_eq!(
r#""information_schema"."users" -- binds: []"#,
sql.to_string()
);
#[cfg(not(feature = "postgres"))]
assert_eq!("`information_schema`.`users` -- binds: []", sql.to_string());
}
| querying_basic_schemas | identifier_name |
lib.rs | extern crate diesel;
extern crate diesel_dynamic_schema; |
use diesel::sql_types::*;
use diesel::*;
use diesel_dynamic_schema::{schema, table};
mod dynamic_values;
mod connection_setup;
use connection_setup::{create_user_table, establish_connection};
#[cfg(feature = "postgres")]
type Backend = diesel::pg::Pg;
#[cfg(feature = "mysql")]
type Backend = diesel::mysql::Mysql;
#[cfg(feature = "sqlite")]
type Backend = diesel::sqlite::Sqlite;
#[test]
fn querying_basic_schemas() {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users(name) VALUES ('Sean')")
.execute(&conn)
.unwrap();
let users = table("users");
let name = users.column::<Text, _>("name");
let names = users.select(name).load::<String>(&conn);
assert_eq!(Ok(vec!["Sean".into()]), names);
}
#[test]
fn querying_multiple_types() {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')")
.execute(&conn)
.unwrap();
let users = table("users");
let hair_color = users.column::<Nullable<Text>, _>("hair_color");
let name = users.column::<Text, _>("name");
let users = users
.select((name, hair_color))
.load::<(String, Option<String>)>(&conn);
assert_eq!(
Ok(vec![("Sean".into(), None), ("Tess".into(), None)]),
users
);
}
#[test]
fn columns_used_in_where_clause() {
let conn = establish_connection();
create_user_table(&conn);
sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')")
.execute(&conn)
.unwrap();
let users = table("users");
let name = users.column::<Text, _>("name");
let users = users
.select(name)
.filter(name.eq("Sean"))
.load::<String>(&conn);
assert_eq!(Ok(vec!["Sean".into()]), users);
}
#[test]
fn providing_custom_schema_name() {
let table = schema("information_schema").table("users");
let sql = debug_query::<Backend, _>(&table);
#[cfg(feature = "postgres")]
assert_eq!(
r#""information_schema"."users" -- binds: []"#,
sql.to_string()
);
#[cfg(not(feature = "postgres"))]
assert_eq!("`information_schema`.`users` -- binds: []", sql.to_string());
} | random_line_split |
|
lib.rs | //! Keep track of all the git repositories on your machine.
//!
//! This crate houses the binary and library for the git-global subcommand, a
//! way to find, query statuses, and gain other insights about all the git repos
//! on your machine. The binary can be installed with cargo: `cargo install
//! git-global`.
//!
//! # Command-line Usage
//!
//! ```bash
//! $ git global [status] # show `git status -s` for all your git repos
//! $ git global info # show information about git-global itself
//! $ git global list # show all git repos git-global knows about
//! $ git global scan # search your filesystem for git repos and update cache
//! ```
//!
//! # Public Interface
//!
//! The git-global project's primary goal is to produce a useful binary. There's
//! no driving force to provide a very good library for other Rust projects to
//! use, so this documentation primarily serves to illustrate how the codebase
//! is structured. (If a library use-case arises, however, that would be fine.)
//!
//! The [`Repo`] struct is a git repository that is identified by the full path
//! to its base directory (instead of, say, its `.git` directory).
//!
//! The [`Config`] struct holds a user's git-global configuration information,
//! which usually merges some default values with values in the `[global]`
//! section of the user's global `.gitconfig` file. It provides access to the
//! list of known `Repo`s via the `get_repos()` method, which reads from a cache
//! file, populating it for the first time after performing a filesystem scan,
//! if necessary.
//!
//! A [`Report`] contains messages added by a subcommand about the overall
//! results of what it did, as well as messages about the specific `Repo`s to
//! which that subcommand applies. All subcommand modules expose an `execute()`
//! function that takes ownership of a `Config` struct and returns a
//! `Result<Report>`. These subcommands live in the [`subcommands`][subcommands]
//! module.
//!
//! The [`run_from_command_line()`][rfcl] function handles running git-global
//! from the command line and serves as the entry point for the binary.
//!
//! [`Config`]: struct.Config.html
//! [`Repo`]: struct.Repo.html
//! [`Report`]: struct.Report.html
//! [rfcl]: fn.run_from_command_line.html
//! [subcommands]: subcommands/index.html
mod cli;
mod config;
mod errors;
mod repo;
mod report;
pub mod subcommands; // Using `pub mod` so we see the docs.
pub use cli::run_from_command_line; | pub use errors::{GitGlobalError, Result};
pub use repo::Repo;
pub use report::Report; | pub use config::Config; | random_line_split |
geom.rs | //! Geometry-related types.
use std::cmp::{max, min};
use std::ops::Range;
/// A rectangle.
///
/// TODO: Maybe merge with `vobsub::Coords`? Or find a third-party library
/// for this?
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Rect {
left: usize,
top: usize,
width: usize,
height: usize,
}
// Allow some of these API methods to be unused.
#[allow(dead_code)]
impl Rect {
/// Create a rectangle by specifying the left, top, width and height
/// values. Panics if the rectangles right or bottom coordinates are
/// out-of-bounds.
pub fn ltwh(l: usize, t: usize, w: usize, h: usize) -> Rect {
l.checked_add(w).expect("rectangle right is larger than usize");
t.checked_add(h).expect("rectangle bottom is larger than usize");
Rect {
left: l,
top: t,
width: w,
height: h,
}
}
/// Create a rectangle from left and top (inclusive) and right and
/// bottom (exclusive) coordinates. Panics if the rectangle
/// has negative height or width.
pub fn ltrb(l: usize, t: usize, r: usize, b: usize) -> Rect {
Rect {
left: l,
top: t,
width: r.checked_sub(l).expect("rectangle has negative width"),
height: b.checked_sub(t).expect("rectangle has negative height"),
}
}
/// The left-most edge of the rectangle (inclusive).
pub fn left(&self) -> usize {
self.left
}
/// The top-most edge of the rectangle (inclusive).
pub fn top(&self) -> usize {
self.top
}
/// The right-most edge of the rectangle (exclusive).
pub fn right(&self) -> usize {
self.left + self.width
}
/// The bottom-most edge of the rectangle (exclusive).
pub fn bottom(&self) -> usize {
self.top + self.height
}
/// The width of the rectangle.
pub fn width(&self) -> usize {
self.width
}
/// The height of the rectangle.
pub fn height(&self) -> usize |
/// Does this rectangle have area zero?
pub fn is_empty(&self) -> bool {
self.width == 0 || self.height == 0
}
/// Is the specified point in this rectangle?
pub fn contains(&self, x: usize, y: usize) -> bool {
self.left <= x && x < self.right() &&
self.top <= y && y < self.bottom()
}
/// Return a rectangle including all the area included by this
/// rectangle and another. If either rectangle has zero area, it will
/// be excluded.
pub fn union(&self, other: &Rect) -> Rect {
if other.is_empty() {
self.to_owned()
} else if self.is_empty() {
other.to_owned()
} else {
Rect::ltrb(min(self.left, other.left),
min(self.top, other.top),
max(self.right(), other.right()),
max(self.bottom(), other.bottom()))
}
}
/// Get a range from `left..right`.
pub fn horizontal_range(&self) -> Range<usize> {
self.left..self.right()
}
}
#[cfg(test)]
mod test {
use quickcheck::{Arbitrary, Gen, TestResult};
use super::*;
impl Arbitrary for Rect {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let s = g.size();
Rect {
left: g.gen_range(0, s),
top: g.gen_range(0, s),
width: g.gen_range(0, s),
height: g.gen_range(0, s),
}
}
fn shrink(&self) -> Box<Iterator<Item=Self>> {
let tuple = (self.left, self.top, self.width, self.height);
Box::new(tuple.shrink().map(|(l, t, w, h)| Rect::ltwh(l, t, w, h)))
}
}
quickcheck! {
fn rect_width_and_height_are_valid(r: Rect) -> bool {
r.width() == r.right() - r.left() &&
r.height() == r.bottom() - r.top()
}
fn rect_union_includes_all_points(r1: Rect, r2: Rect) -> bool {
let u = r1.union(&r2);
((r1.is_empty() ||
u.contains(r1.left, r1.top) &&
u.contains(r1.right()-1, r1.bottom()-1)) &&
(r2.is_empty() ||
u.contains(r2.left, r2.top) &&
u.contains(r2.right()-1, r2.bottom()-1)))
}
fn rect_union_with_zero_size_is_identity(r1: Rect, r2: Rect)
-> TestResult {
if r2.is_empty() {
TestResult::from_bool(r1.union(&r2) == r1)
} else if r1.is_empty() {
TestResult::from_bool(r1.union(&r2) == r2)
} else {
TestResult::discard()
}
}
}
}
| {
self.height
} | identifier_body |
geom.rs | //! Geometry-related types.
use std::cmp::{max, min};
use std::ops::Range;
/// A rectangle.
///
/// TODO: Maybe merge with `vobsub::Coords`? Or find a third-party library
/// for this?
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Rect {
left: usize,
top: usize,
width: usize,
height: usize,
}
// Allow some of these API methods to be unused.
#[allow(dead_code)]
impl Rect {
/// Create a rectangle by specifying the left, top, width and height
/// values. Panics if the rectangles right or bottom coordinates are
/// out-of-bounds.
pub fn ltwh(l: usize, t: usize, w: usize, h: usize) -> Rect {
l.checked_add(w).expect("rectangle right is larger than usize");
t.checked_add(h).expect("rectangle bottom is larger than usize");
Rect {
left: l,
top: t,
width: w,
height: h,
}
}
/// Create a rectangle from left and top (inclusive) and right and
/// bottom (exclusive) coordinates. Panics if the rectangle
/// has negative height or width.
pub fn ltrb(l: usize, t: usize, r: usize, b: usize) -> Rect {
Rect {
left: l,
top: t,
width: r.checked_sub(l).expect("rectangle has negative width"),
height: b.checked_sub(t).expect("rectangle has negative height"),
}
}
/// The left-most edge of the rectangle (inclusive).
pub fn left(&self) -> usize {
self.left
}
/// The top-most edge of the rectangle (inclusive).
pub fn top(&self) -> usize {
self.top
}
/// The right-most edge of the rectangle (exclusive).
pub fn right(&self) -> usize {
self.left + self.width
}
/// The bottom-most edge of the rectangle (exclusive).
pub fn bottom(&self) -> usize {
self.top + self.height
}
/// The width of the rectangle.
pub fn width(&self) -> usize {
self.width
}
/// The height of the rectangle.
pub fn height(&self) -> usize {
self.height
}
/// Does this rectangle have area zero?
pub fn | (&self) -> bool {
self.width == 0 || self.height == 0
}
/// Is the specified point in this rectangle?
pub fn contains(&self, x: usize, y: usize) -> bool {
self.left <= x && x < self.right() &&
self.top <= y && y < self.bottom()
}
/// Return a rectangle including all the area included by this
/// rectangle and another. If either rectangle has zero area, it will
/// be excluded.
pub fn union(&self, other: &Rect) -> Rect {
if other.is_empty() {
self.to_owned()
} else if self.is_empty() {
other.to_owned()
} else {
Rect::ltrb(min(self.left, other.left),
min(self.top, other.top),
max(self.right(), other.right()),
max(self.bottom(), other.bottom()))
}
}
/// Get a range from `left..right`.
pub fn horizontal_range(&self) -> Range<usize> {
self.left..self.right()
}
}
#[cfg(test)]
mod test {
use quickcheck::{Arbitrary, Gen, TestResult};
use super::*;
impl Arbitrary for Rect {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let s = g.size();
Rect {
left: g.gen_range(0, s),
top: g.gen_range(0, s),
width: g.gen_range(0, s),
height: g.gen_range(0, s),
}
}
fn shrink(&self) -> Box<Iterator<Item=Self>> {
let tuple = (self.left, self.top, self.width, self.height);
Box::new(tuple.shrink().map(|(l, t, w, h)| Rect::ltwh(l, t, w, h)))
}
}
quickcheck! {
fn rect_width_and_height_are_valid(r: Rect) -> bool {
r.width() == r.right() - r.left() &&
r.height() == r.bottom() - r.top()
}
fn rect_union_includes_all_points(r1: Rect, r2: Rect) -> bool {
let u = r1.union(&r2);
((r1.is_empty() ||
u.contains(r1.left, r1.top) &&
u.contains(r1.right()-1, r1.bottom()-1)) &&
(r2.is_empty() ||
u.contains(r2.left, r2.top) &&
u.contains(r2.right()-1, r2.bottom()-1)))
}
fn rect_union_with_zero_size_is_identity(r1: Rect, r2: Rect)
-> TestResult {
if r2.is_empty() {
TestResult::from_bool(r1.union(&r2) == r1)
} else if r1.is_empty() {
TestResult::from_bool(r1.union(&r2) == r2)
} else {
TestResult::discard()
}
}
}
}
| is_empty | identifier_name |
geom.rs | //! Geometry-related types.
use std::cmp::{max, min};
use std::ops::Range;
/// A rectangle.
///
/// TODO: Maybe merge with `vobsub::Coords`? Or find a third-party library
/// for this?
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Rect {
left: usize,
top: usize,
width: usize,
height: usize,
}
// Allow some of these API methods to be unused.
#[allow(dead_code)]
impl Rect {
/// Create a rectangle by specifying the left, top, width and height
/// values. Panics if the rectangles right or bottom coordinates are
/// out-of-bounds.
pub fn ltwh(l: usize, t: usize, w: usize, h: usize) -> Rect {
l.checked_add(w).expect("rectangle right is larger than usize");
t.checked_add(h).expect("rectangle bottom is larger than usize");
Rect {
left: l,
top: t,
width: w,
height: h,
}
}
/// Create a rectangle from left and top (inclusive) and right and
/// bottom (exclusive) coordinates. Panics if the rectangle
/// has negative height or width.
pub fn ltrb(l: usize, t: usize, r: usize, b: usize) -> Rect { | width: r.checked_sub(l).expect("rectangle has negative width"),
height: b.checked_sub(t).expect("rectangle has negative height"),
}
}
/// The left-most edge of the rectangle (inclusive).
pub fn left(&self) -> usize {
self.left
}
/// The top-most edge of the rectangle (inclusive).
pub fn top(&self) -> usize {
self.top
}
/// The right-most edge of the rectangle (exclusive).
pub fn right(&self) -> usize {
self.left + self.width
}
/// The bottom-most edge of the rectangle (exclusive).
pub fn bottom(&self) -> usize {
self.top + self.height
}
/// The width of the rectangle.
pub fn width(&self) -> usize {
self.width
}
/// The height of the rectangle.
pub fn height(&self) -> usize {
self.height
}
/// Does this rectangle have area zero?
pub fn is_empty(&self) -> bool {
self.width == 0 || self.height == 0
}
/// Is the specified point in this rectangle?
pub fn contains(&self, x: usize, y: usize) -> bool {
self.left <= x && x < self.right() &&
self.top <= y && y < self.bottom()
}
/// Return a rectangle including all the area included by this
/// rectangle and another. If either rectangle has zero area, it will
/// be excluded.
pub fn union(&self, other: &Rect) -> Rect {
if other.is_empty() {
self.to_owned()
} else if self.is_empty() {
other.to_owned()
} else {
Rect::ltrb(min(self.left, other.left),
min(self.top, other.top),
max(self.right(), other.right()),
max(self.bottom(), other.bottom()))
}
}
/// Get a range from `left..right`.
pub fn horizontal_range(&self) -> Range<usize> {
self.left..self.right()
}
}
#[cfg(test)]
mod test {
use quickcheck::{Arbitrary, Gen, TestResult};
use super::*;
impl Arbitrary for Rect {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let s = g.size();
Rect {
left: g.gen_range(0, s),
top: g.gen_range(0, s),
width: g.gen_range(0, s),
height: g.gen_range(0, s),
}
}
fn shrink(&self) -> Box<Iterator<Item=Self>> {
let tuple = (self.left, self.top, self.width, self.height);
Box::new(tuple.shrink().map(|(l, t, w, h)| Rect::ltwh(l, t, w, h)))
}
}
quickcheck! {
fn rect_width_and_height_are_valid(r: Rect) -> bool {
r.width() == r.right() - r.left() &&
r.height() == r.bottom() - r.top()
}
fn rect_union_includes_all_points(r1: Rect, r2: Rect) -> bool {
let u = r1.union(&r2);
((r1.is_empty() ||
u.contains(r1.left, r1.top) &&
u.contains(r1.right()-1, r1.bottom()-1)) &&
(r2.is_empty() ||
u.contains(r2.left, r2.top) &&
u.contains(r2.right()-1, r2.bottom()-1)))
}
fn rect_union_with_zero_size_is_identity(r1: Rect, r2: Rect)
-> TestResult {
if r2.is_empty() {
TestResult::from_bool(r1.union(&r2) == r1)
} else if r1.is_empty() {
TestResult::from_bool(r1.union(&r2) == r2)
} else {
TestResult::discard()
}
}
}
} | Rect {
left: l,
top: t, | random_line_split |
instr_blendpd.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*; | use ::test::run_test;
#[test]
fn blendpd_1() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(2)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 219, 2], OperandSize::Dword)
}
#[test]
fn blendpd_2() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM1)), operand2: Some(IndirectScaledIndexedDisplaced(EBX, EDI, Eight, 1375625917, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(101)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 140, 251, 189, 98, 254, 81, 101], OperandSize::Dword)
}
#[test]
fn blendpd_3() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM7)), operand3: Some(Literal8(8)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 247, 8], OperandSize::Qword)
}
#[test]
fn blendpd_4() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectScaledIndexedDisplaced(RAX, RAX, Eight, 536446275, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(94)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 180, 192, 67, 133, 249, 31, 94], OperandSize::Qword)
} | use ::Operand::*;
use ::Reg::*;
use ::RegScale::*; | random_line_split |
instr_blendpd.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn blendpd_1() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(2)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 219, 2], OperandSize::Dword)
}
#[test]
fn blendpd_2() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM1)), operand2: Some(IndirectScaledIndexedDisplaced(EBX, EDI, Eight, 1375625917, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(101)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 140, 251, 189, 98, 254, 81, 101], OperandSize::Dword)
}
#[test]
fn | () {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM7)), operand3: Some(Literal8(8)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 247, 8], OperandSize::Qword)
}
#[test]
fn blendpd_4() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectScaledIndexedDisplaced(RAX, RAX, Eight, 536446275, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(94)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 180, 192, 67, 133, 249, 31, 94], OperandSize::Qword)
}
| blendpd_3 | identifier_name |
instr_blendpd.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn blendpd_1() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(2)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 219, 2], OperandSize::Dword)
}
#[test]
fn blendpd_2() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM1)), operand2: Some(IndirectScaledIndexedDisplaced(EBX, EDI, Eight, 1375625917, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(101)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 140, 251, 189, 98, 254, 81, 101], OperandSize::Dword)
}
#[test]
fn blendpd_3() {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM7)), operand3: Some(Literal8(8)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 247, 8], OperandSize::Qword)
}
#[test]
fn blendpd_4() | {
run_test(&Instruction { mnemonic: Mnemonic::BLENDPD, operand1: Some(Direct(XMM6)), operand2: Some(IndirectScaledIndexedDisplaced(RAX, RAX, Eight, 536446275, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(94)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 58, 13, 180, 192, 67, 133, 249, 31, 94], OperandSize::Qword)
} | identifier_body |
|
singlebyte.rs | // This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Common codec implementation for single-byte encodings.
use libtww::std::convert::Into;
use util::{as_char, StrCharIndex};
use types::*;
/// A common framework for single-byte encodings based on ASCII.
#[derive(Copy, Clone)]
pub struct SingleByteEncoding {
pub name: &'static str,
pub whatwg_name: Option<&'static str>,
pub index_forward: fn(u8) -> u16,
pub index_backward: fn(u32) -> u8,
}
impl Encoding for SingleByteEncoding {
fn name(&self) -> &'static str {
self.name
}
fn whatwg_name(&self) -> Option<&'static str> {
self.whatwg_name
}
fn raw_encoder(&self) -> Box<RawEncoder> {
SingleByteEncoder::new(self.index_backward)
}
fn raw_decoder(&self) -> Box<RawDecoder> {
SingleByteDecoder::new(self.index_forward)
}
}
/// An encoder for single-byte encodings based on ASCII.
#[derive(Clone, Copy)]
pub struct SingleByteEncoder {
index_backward: fn(u32) -> u8,
}
impl SingleByteEncoder {
pub fn new(index_backward: fn(u32) -> u8) -> Box<RawEncoder> {
Box::new(SingleByteEncoder { index_backward: index_backward })
}
}
impl RawEncoder for SingleByteEncoder {
fn from_self(&self) -> Box<RawEncoder> {
SingleByteEncoder::new(self.index_backward)
}
fn is_ascii_compatible(&self) -> bool {
true
}
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i, j), ch) in input.index_iter() {
if ch <= '\u{7f}' {
output.write_byte(ch as u8);
continue;
} else |
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for single-byte encodings based on ASCII.
#[derive(Clone, Copy)]
pub struct SingleByteDecoder {
index_forward: fn(u8) -> u16,
}
impl SingleByteDecoder {
pub fn new(index_forward: fn(u8) -> u16) -> Box<RawDecoder> {
Box::new(SingleByteDecoder { index_forward: index_forward })
}
}
impl RawDecoder for SingleByteDecoder {
fn from_self(&self) -> Box<RawDecoder> {
SingleByteDecoder::new(self.index_forward)
}
fn is_ascii_compatible(&self) -> bool {
true
}
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
let mut i = 0;
let len = input.len();
while i < len {
if input[i] <= 0x7f {
output.write_char(input[i] as char);
} else {
let ch = (self.index_forward)(input[i]);
if ch!= 0xffff {
output.write_char(as_char(ch as u32));
} else {
return (i,
Some(CodecError {
upto: i as isize + 1,
cause: "invalid sequence".into(),
}));
}
}
i += 1;
}
(i, None)
}
fn raw_finish(&mut self, _output: &mut StringWriter) -> Option<CodecError> {
None
}
}
/// Algorithmic mapping for ISO 8859-1.
pub mod iso_8859_1 {
#[inline]
pub fn forward(code: u8) -> u16 {
code as u16
}
#[inline]
pub fn backward(code: u32) -> u8 {
if (code &!0x7f) == 0x80 {
code as u8
} else {
0
}
}
}
#[cfg(test)]
mod tests {
use all::ISO_8859_2;
use types::*;
#[test]
fn test_encoder_non_bmp() {
let mut e = ISO_8859_2.raw_encoder();
assert_feed_err!(e, "A", "\u{FFFF}", "B", [0x41]);
assert_feed_err!(e, "A", "\u{10000}", "B", [0x41]);
}
}
| {
let index = (self.index_backward)(ch as u32);
if index != 0 {
output.write_byte(index);
} else {
return (i,
Some(CodecError {
upto: j as isize,
cause: "unrepresentable character".into(),
}));
}
} | conditional_block |
singlebyte.rs | // This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Common codec implementation for single-byte encodings.
use libtww::std::convert::Into;
use util::{as_char, StrCharIndex};
use types::*;
/// A common framework for single-byte encodings based on ASCII.
#[derive(Copy, Clone)]
pub struct SingleByteEncoding {
pub name: &'static str,
pub whatwg_name: Option<&'static str>,
pub index_forward: fn(u8) -> u16,
pub index_backward: fn(u32) -> u8,
}
impl Encoding for SingleByteEncoding {
fn name(&self) -> &'static str {
self.name
}
fn whatwg_name(&self) -> Option<&'static str> {
self.whatwg_name
}
fn raw_encoder(&self) -> Box<RawEncoder> {
SingleByteEncoder::new(self.index_backward)
}
fn raw_decoder(&self) -> Box<RawDecoder> {
SingleByteDecoder::new(self.index_forward)
}
}
/// An encoder for single-byte encodings based on ASCII.
#[derive(Clone, Copy)]
pub struct SingleByteEncoder {
index_backward: fn(u32) -> u8,
}
impl SingleByteEncoder {
pub fn new(index_backward: fn(u32) -> u8) -> Box<RawEncoder> {
Box::new(SingleByteEncoder { index_backward: index_backward })
}
}
impl RawEncoder for SingleByteEncoder {
fn from_self(&self) -> Box<RawEncoder> {
SingleByteEncoder::new(self.index_backward)
}
fn is_ascii_compatible(&self) -> bool {
true
}
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i, j), ch) in input.index_iter() {
if ch <= '\u{7f}' {
output.write_byte(ch as u8);
continue;
} else {
let index = (self.index_backward)(ch as u32);
if index!= 0 {
output.write_byte(index);
} else {
return (i,
Some(CodecError {
upto: j as isize,
cause: "unrepresentable character".into(),
}));
}
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for single-byte encodings based on ASCII.
#[derive(Clone, Copy)]
pub struct SingleByteDecoder {
index_forward: fn(u8) -> u16,
}
impl SingleByteDecoder {
pub fn new(index_forward: fn(u8) -> u16) -> Box<RawDecoder> {
Box::new(SingleByteDecoder { index_forward: index_forward })
}
}
impl RawDecoder for SingleByteDecoder {
fn from_self(&self) -> Box<RawDecoder> {
SingleByteDecoder::new(self.index_forward)
}
fn is_ascii_compatible(&self) -> bool {
true
}
fn | (&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
let mut i = 0;
let len = input.len();
while i < len {
if input[i] <= 0x7f {
output.write_char(input[i] as char);
} else {
let ch = (self.index_forward)(input[i]);
if ch!= 0xffff {
output.write_char(as_char(ch as u32));
} else {
return (i,
Some(CodecError {
upto: i as isize + 1,
cause: "invalid sequence".into(),
}));
}
}
i += 1;
}
(i, None)
}
fn raw_finish(&mut self, _output: &mut StringWriter) -> Option<CodecError> {
None
}
}
/// Algorithmic mapping for ISO 8859-1.
pub mod iso_8859_1 {
#[inline]
pub fn forward(code: u8) -> u16 {
code as u16
}
#[inline]
pub fn backward(code: u32) -> u8 {
if (code &!0x7f) == 0x80 {
code as u8
} else {
0
}
}
}
#[cfg(test)]
mod tests {
use all::ISO_8859_2;
use types::*;
#[test]
fn test_encoder_non_bmp() {
let mut e = ISO_8859_2.raw_encoder();
assert_feed_err!(e, "A", "\u{FFFF}", "B", [0x41]);
assert_feed_err!(e, "A", "\u{10000}", "B", [0x41]);
}
}
| raw_feed | identifier_name |
singlebyte.rs | // This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Common codec implementation for single-byte encodings.
use libtww::std::convert::Into;
use util::{as_char, StrCharIndex};
use types::*;
/// A common framework for single-byte encodings based on ASCII.
#[derive(Copy, Clone)]
pub struct SingleByteEncoding {
pub name: &'static str,
pub whatwg_name: Option<&'static str>,
pub index_forward: fn(u8) -> u16,
pub index_backward: fn(u32) -> u8,
}
impl Encoding for SingleByteEncoding {
fn name(&self) -> &'static str {
self.name
}
fn whatwg_name(&self) -> Option<&'static str> {
self.whatwg_name
}
fn raw_encoder(&self) -> Box<RawEncoder> {
SingleByteEncoder::new(self.index_backward)
}
fn raw_decoder(&self) -> Box<RawDecoder> {
SingleByteDecoder::new(self.index_forward)
}
}
/// An encoder for single-byte encodings based on ASCII.
#[derive(Clone, Copy)]
pub struct SingleByteEncoder {
index_backward: fn(u32) -> u8,
}
impl SingleByteEncoder {
pub fn new(index_backward: fn(u32) -> u8) -> Box<RawEncoder> {
Box::new(SingleByteEncoder { index_backward: index_backward })
}
}
impl RawEncoder for SingleByteEncoder {
fn from_self(&self) -> Box<RawEncoder> {
SingleByteEncoder::new(self.index_backward)
}
fn is_ascii_compatible(&self) -> bool {
true
}
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i, j), ch) in input.index_iter() {
if ch <= '\u{7f}' {
output.write_byte(ch as u8);
continue;
} else {
let index = (self.index_backward)(ch as u32);
if index!= 0 {
output.write_byte(index);
} else {
return (i,
Some(CodecError {
upto: j as isize,
cause: "unrepresentable character".into(),
}));
}
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for single-byte encodings based on ASCII.
#[derive(Clone, Copy)]
pub struct SingleByteDecoder {
index_forward: fn(u8) -> u16,
}
impl SingleByteDecoder {
pub fn new(index_forward: fn(u8) -> u16) -> Box<RawDecoder> {
Box::new(SingleByteDecoder { index_forward: index_forward })
}
}
impl RawDecoder for SingleByteDecoder {
fn from_self(&self) -> Box<RawDecoder> {
SingleByteDecoder::new(self.index_forward)
}
fn is_ascii_compatible(&self) -> bool {
true
}
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
let mut i = 0;
let len = input.len();
while i < len {
if input[i] <= 0x7f {
output.write_char(input[i] as char);
} else {
let ch = (self.index_forward)(input[i]);
if ch!= 0xffff {
output.write_char(as_char(ch as u32));
} else {
return (i,
Some(CodecError {
upto: i as isize + 1,
cause: "invalid sequence".into(),
}));
}
}
i += 1;
}
(i, None)
}
fn raw_finish(&mut self, _output: &mut StringWriter) -> Option<CodecError> {
None
}
}
/// Algorithmic mapping for ISO 8859-1.
pub mod iso_8859_1 {
#[inline]
pub fn forward(code: u8) -> u16 {
code as u16
}
#[inline]
pub fn backward(code: u32) -> u8 {
if (code &!0x7f) == 0x80 {
code as u8
} else {
0
}
}
}
#[cfg(test)]
mod tests {
use all::ISO_8859_2;
use types::*;
#[test]
fn test_encoder_non_bmp() {
let mut e = ISO_8859_2.raw_encoder();
assert_feed_err!(e, "A", "\u{FFFF}", "B", [0x41]); | }
} | assert_feed_err!(e, "A", "\u{10000}", "B", [0x41]); | random_line_split |
singlebyte.rs | // This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Common codec implementation for single-byte encodings.
use libtww::std::convert::Into;
use util::{as_char, StrCharIndex};
use types::*;
/// A common framework for single-byte encodings based on ASCII.
#[derive(Copy, Clone)]
pub struct SingleByteEncoding {
pub name: &'static str,
pub whatwg_name: Option<&'static str>,
pub index_forward: fn(u8) -> u16,
pub index_backward: fn(u32) -> u8,
}
impl Encoding for SingleByteEncoding {
fn name(&self) -> &'static str {
self.name
}
fn whatwg_name(&self) -> Option<&'static str> |
fn raw_encoder(&self) -> Box<RawEncoder> {
SingleByteEncoder::new(self.index_backward)
}
fn raw_decoder(&self) -> Box<RawDecoder> {
SingleByteDecoder::new(self.index_forward)
}
}
/// An encoder for single-byte encodings based on ASCII.
#[derive(Clone, Copy)]
pub struct SingleByteEncoder {
index_backward: fn(u32) -> u8,
}
impl SingleByteEncoder {
pub fn new(index_backward: fn(u32) -> u8) -> Box<RawEncoder> {
Box::new(SingleByteEncoder { index_backward: index_backward })
}
}
impl RawEncoder for SingleByteEncoder {
fn from_self(&self) -> Box<RawEncoder> {
SingleByteEncoder::new(self.index_backward)
}
fn is_ascii_compatible(&self) -> bool {
true
}
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i, j), ch) in input.index_iter() {
if ch <= '\u{7f}' {
output.write_byte(ch as u8);
continue;
} else {
let index = (self.index_backward)(ch as u32);
if index!= 0 {
output.write_byte(index);
} else {
return (i,
Some(CodecError {
upto: j as isize,
cause: "unrepresentable character".into(),
}));
}
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for single-byte encodings based on ASCII.
#[derive(Clone, Copy)]
pub struct SingleByteDecoder {
index_forward: fn(u8) -> u16,
}
impl SingleByteDecoder {
pub fn new(index_forward: fn(u8) -> u16) -> Box<RawDecoder> {
Box::new(SingleByteDecoder { index_forward: index_forward })
}
}
impl RawDecoder for SingleByteDecoder {
fn from_self(&self) -> Box<RawDecoder> {
SingleByteDecoder::new(self.index_forward)
}
fn is_ascii_compatible(&self) -> bool {
true
}
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
let mut i = 0;
let len = input.len();
while i < len {
if input[i] <= 0x7f {
output.write_char(input[i] as char);
} else {
let ch = (self.index_forward)(input[i]);
if ch!= 0xffff {
output.write_char(as_char(ch as u32));
} else {
return (i,
Some(CodecError {
upto: i as isize + 1,
cause: "invalid sequence".into(),
}));
}
}
i += 1;
}
(i, None)
}
fn raw_finish(&mut self, _output: &mut StringWriter) -> Option<CodecError> {
None
}
}
/// Algorithmic mapping for ISO 8859-1.
pub mod iso_8859_1 {
#[inline]
pub fn forward(code: u8) -> u16 {
code as u16
}
#[inline]
pub fn backward(code: u32) -> u8 {
if (code &!0x7f) == 0x80 {
code as u8
} else {
0
}
}
}
#[cfg(test)]
mod tests {
use all::ISO_8859_2;
use types::*;
#[test]
fn test_encoder_non_bmp() {
let mut e = ISO_8859_2.raw_encoder();
assert_feed_err!(e, "A", "\u{FFFF}", "B", [0x41]);
assert_feed_err!(e, "A", "\u{10000}", "B", [0x41]);
}
}
| {
self.whatwg_name
} | identifier_body |
coherence-blanket-conflicts-with-specific-multidispatch.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::Show;
use std::default::Default;
// Test that a blank impl for all T conflicts with an impl for some
// specific T, even when there are multiple type parameters involved.
trait MyTrait<T> {
fn get(&self) -> T;
}
impl<T> MyTrait<T> for T { //~ ERROR E0119
fn get(&self) -> T {
fail!()
}
}
#[deriving(Clone)]
struct MyType {
dummy: uint
}
impl MyTrait<MyType> for MyType {
fn get(&self) -> uint { (*self).clone() }
}
fn | () { }
| main | identifier_name |
coherence-blanket-conflicts-with-specific-multidispatch.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | // except according to those terms.
use std::fmt::Show;
use std::default::Default;
// Test that a blank impl for all T conflicts with an impl for some
// specific T, even when there are multiple type parameters involved.
trait MyTrait<T> {
fn get(&self) -> T;
}
impl<T> MyTrait<T> for T { //~ ERROR E0119
fn get(&self) -> T {
fail!()
}
}
#[deriving(Clone)]
struct MyType {
dummy: uint
}
impl MyTrait<MyType> for MyType {
fn get(&self) -> uint { (*self).clone() }
}
fn main() { } | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | random_line_split |
predicate.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use core_traits::{
ValueType,
ValueTypeSet,
};
use mentat_core::{
Schema,
};
use edn::query::{
FnArg,
PlainSymbol,
Predicate,
TypeAnnotation,
};
use clauses::ConjoiningClauses;
use clauses::convert::ValueTypes;
use query_algebrizer_traits::errors::{
AlgebrizerError,
Result,
};
use types::{
ColumnConstraint,
EmptyBecause,
Inequality,
QueryValue,
};
use Known;
/// Application of predicates.
impl ConjoiningClauses {
/// There are several kinds of predicates in our Datalog:
/// - A limited set of binary comparison operators: < > <= >=!=.
/// These are converted into SQLite binary comparisons and some type constraints.
/// - In the future, some predicates that are implemented via function calls in SQLite.
///
/// At present we have implemented only the five built-in comparison binary operators.
pub(crate) fn apply_predicate(&mut self, known: Known, predicate: Predicate) -> Result<()> {
// Because we'll be growing the set of built-in predicates, handling each differently,
// and ultimately allowing user-specified predicates, we match on the predicate name first.
if let Some(op) = Inequality::from_datalog_operator(predicate.operator.0.as_str()) {
self.apply_inequality(known, op, predicate)
} else {
bail!(AlgebrizerError::UnknownFunction(predicate.operator.clone()))
}
}
fn potential_types(&self, schema: &Schema, fn_arg: &FnArg) -> Result<ValueTypeSet> {
match fn_arg {
&FnArg::Variable(ref v) => Ok(self.known_type_set(v)),
_ => fn_arg.potential_types(schema),
}
}
/// Apply a type annotation, which is a construct like a predicate that constrains the argument
/// to be a specific ValueType.
pub(crate) fn apply_type_anno(&mut self, anno: &TypeAnnotation) -> Result<()> {
match ValueType::from_keyword(&anno.value_type) {
Some(value_type) => self.add_type_requirement(anno.variable.clone(), ValueTypeSet::of_one(value_type)),
None => bail!(AlgebrizerError::InvalidArgumentType(PlainSymbol::plain("type"), ValueTypeSet::any(), 2)),
}
Ok(())
}
/// This function:
/// - Resolves variables and converts types to those more amenable to SQL.
/// - Ensures that the predicate functions name a known operator.
/// - Accumulates an `Inequality` constraint into the `wheres` list.
pub(crate) fn apply_inequality(&mut self, known: Known, comparison: Inequality, predicate: Predicate) -> Result<()> {
if predicate.args.len()!= 2 {
bail!(AlgebrizerError::InvalidNumberOfArguments(predicate.operator.clone(), predicate.args.len(), 2));
}
// Go from arguments -- parser output -- to columns or values.
// Any variables that aren't bound by this point in the linear processing of clauses will
// cause the application of the predicate to fail.
let mut args = predicate.args.into_iter();
let left = args.next().expect("two args");
let right = args.next().expect("two args");
// The types we're handling here must be the intersection of the possible types of the arguments,
// the known types of any variables, and the types supported by our inequality operators.
let supported_types = comparison.supported_types();
let mut left_types = self.potential_types(known.schema, &left)?
.intersection(&supported_types);
if left_types.is_empty() {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 0));
}
let mut right_types = self.potential_types(known.schema, &right)?
.intersection(&supported_types);
if right_types.is_empty() {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 1));
}
// We would like to allow longs to compare to doubles.
// Do this by expanding the type sets. `resolve_numeric_argument` will
// use `Long` by preference.
if right_types.contains(ValueType::Long) {
right_types.insert(ValueType::Double);
}
if left_types.contains(ValueType::Long) {
left_types.insert(ValueType::Double);
}
let shared_types = left_types.intersection(&right_types);
if shared_types.is_empty() {
// In isolation these are both valid inputs to the operator, but the query cannot
// succeed because the types don't match.
self.mark_known_empty(
if let Some(var) = left.as_variable().or_else(|| right.as_variable()) {
EmptyBecause::TypeMismatch {
var: var.clone(),
existing: left_types,
desired: right_types,
}
} else {
EmptyBecause::KnownTypeMismatch {
left: left_types,
right: right_types,
}
});
return Ok(());
}
// We expect the intersection to be Long, Long+Double, Double, or Instant.
let left_v;
let right_v;
if shared_types == ValueTypeSet::of_one(ValueType::Instant) {
left_v = self.resolve_instant_argument(&predicate.operator, 0, left)?;
right_v = self.resolve_instant_argument(&predicate.operator, 1, right)?;
} else if shared_types.is_only_numeric() {
left_v = self.resolve_numeric_argument(&predicate.operator, 0, left)?;
right_v = self.resolve_numeric_argument(&predicate.operator, 1, right)?;
} else if shared_types == ValueTypeSet::of_one(ValueType::Ref) {
left_v = self.resolve_ref_argument(known.schema, &predicate.operator, 0, left)?;
right_v = self.resolve_ref_argument(known.schema, &predicate.operator, 1, right)?;
} else {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 0));
}
// These arguments must be variables or instant/numeric constants.
// TODO: static evaluation. #383.
let constraint = comparison.to_constraint(left_v, right_v);
self.wheres.add_intersection(constraint);
Ok(())
}
}
impl Inequality {
fn to_constraint(&self, left: QueryValue, right: QueryValue) -> ColumnConstraint {
match *self {
Inequality::TxAfter |
Inequality::TxBefore => {
// TODO: both ends of the range must be inside the tx partition!
// If we know the partition map -- and at this point we do, it's just
// not passed to this function -- then we can generate two constraints,
// or clamp a fixed value.
},
_ => {
},
}
ColumnConstraint::Inequality {
operator: *self,
left: left,
right: right,
}
}
}
#[cfg(test)]
mod testing {
use super::*;
use core_traits::attribute::{
Unique,
};
use core_traits::{
Attribute,
TypedValue,
ValueType,
};
use edn::query::{
FnArg,
Keyword,
Pattern,
PatternNonValuePlace,
PatternValuePlace,
PlainSymbol,
Variable,
};
use clauses::{
add_attribute,
associate_ident,
ident,
};
use types::{
ColumnConstraint,
EmptyBecause,
QueryValue,
};
#[test]
/// Apply two patterns: a pattern and a numeric predicate.
/// Verify that after application of the predicate we know that the value
/// must be numeric.
fn test_apply_inequality() {
let mut cc = ConjoiningClauses::default();
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "bar"), 99);
add_attribute(&mut schema, 99, Attribute {
value_type: ValueType::Long,
..Default::default()
});
let x = Variable::from_valid_name("?x");
let y = Variable::from_valid_name("?y");
let known = Known::for_schema(&schema);
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: PatternNonValuePlace::Placeholder,
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
assert!(!cc.is_known_empty());
let op = PlainSymbol::plain("<");
let comp = Inequality::from_datalog_operator(op.name()).unwrap();
assert!(cc.apply_inequality(known, comp, Predicate {
operator: op,
args: vec![
FnArg::Variable(Variable::from_valid_name("?y")), FnArg::EntidOrInteger(10),
]}).is_ok());
assert!(!cc.is_known_empty());
// Finally, expand column bindings to get the overlaps for?x.
cc.expand_column_bindings();
assert!(!cc.is_known_empty());
// After processing those two clauses, we know that?y must be numeric, but not exactly
// which type it must be.
assert_eq!(None, cc.known_type(&y)); // Not just one.
let expected = ValueTypeSet::of_numeric_types();
assert_eq!(Some(&expected), cc.known_types.get(&y));
let clauses = cc.wheres;
assert_eq!(clauses.len(), 1);
assert_eq!(clauses.0[0], ColumnConstraint::Inequality {
operator: Inequality::LessThan,
left: QueryValue::Column(cc.column_bindings.get(&y).unwrap()[0].clone()),
right: QueryValue::TypedValue(TypedValue::Long(10)),
}.into());
}
#[test]
/// Apply three patterns: an unbound pattern to establish a value var,
/// a predicate to constrain the val to numeric types, and a third pattern to conflict with the
/// numeric types and cause the pattern to fail.
fn | () {
let mut cc = ConjoiningClauses::default();
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "bar"), 99);
associate_ident(&mut schema, Keyword::namespaced("foo", "roz"), 98);
add_attribute(&mut schema, 99, Attribute {
value_type: ValueType::Long,
..Default::default()
});
add_attribute(&mut schema, 98, Attribute {
value_type: ValueType::String,
unique: Some(Unique::Identity),
..Default::default()
});
let x = Variable::from_valid_name("?x");
let y = Variable::from_valid_name("?y");
let known = Known::for_schema(&schema);
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: PatternNonValuePlace::Placeholder,
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
assert!(!cc.is_known_empty());
let op = PlainSymbol::plain(">=");
let comp = Inequality::from_datalog_operator(op.name()).unwrap();
assert!(cc.apply_inequality(known, comp, Predicate {
operator: op,
args: vec![
FnArg::Variable(Variable::from_valid_name("?y")), FnArg::EntidOrInteger(10),
]}).is_ok());
assert!(!cc.is_known_empty());
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: ident("foo", "roz"),
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
// Finally, expand column bindings to get the overlaps for?x.
cc.expand_column_bindings();
assert!(cc.is_known_empty());
assert_eq!(cc.empty_because.unwrap(),
EmptyBecause::TypeMismatch {
var: y.clone(),
existing: ValueTypeSet::of_numeric_types(),
desired: ValueTypeSet::of_one(ValueType::String),
});
}
}
| test_apply_conflict_with_numeric_range | identifier_name |
predicate.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use core_traits::{
ValueType,
ValueTypeSet,
};
use mentat_core::{
Schema,
};
use edn::query::{
FnArg,
PlainSymbol,
Predicate,
TypeAnnotation,
};
use clauses::ConjoiningClauses;
use clauses::convert::ValueTypes;
use query_algebrizer_traits::errors::{
AlgebrizerError,
Result,
};
use types::{
ColumnConstraint,
EmptyBecause,
Inequality,
QueryValue,
};
use Known;
/// Application of predicates.
impl ConjoiningClauses {
/// There are several kinds of predicates in our Datalog:
/// - A limited set of binary comparison operators: < > <= >=!=.
/// These are converted into SQLite binary comparisons and some type constraints.
/// - In the future, some predicates that are implemented via function calls in SQLite.
///
/// At present we have implemented only the five built-in comparison binary operators.
pub(crate) fn apply_predicate(&mut self, known: Known, predicate: Predicate) -> Result<()> {
// Because we'll be growing the set of built-in predicates, handling each differently,
// and ultimately allowing user-specified predicates, we match on the predicate name first.
if let Some(op) = Inequality::from_datalog_operator(predicate.operator.0.as_str()) {
self.apply_inequality(known, op, predicate)
} else {
bail!(AlgebrizerError::UnknownFunction(predicate.operator.clone()))
}
}
fn potential_types(&self, schema: &Schema, fn_arg: &FnArg) -> Result<ValueTypeSet> {
match fn_arg {
&FnArg::Variable(ref v) => Ok(self.known_type_set(v)),
_ => fn_arg.potential_types(schema),
}
}
/// Apply a type annotation, which is a construct like a predicate that constrains the argument
/// to be a specific ValueType.
pub(crate) fn apply_type_anno(&mut self, anno: &TypeAnnotation) -> Result<()> {
match ValueType::from_keyword(&anno.value_type) {
Some(value_type) => self.add_type_requirement(anno.variable.clone(), ValueTypeSet::of_one(value_type)),
None => bail!(AlgebrizerError::InvalidArgumentType(PlainSymbol::plain("type"), ValueTypeSet::any(), 2)),
}
Ok(())
}
/// This function:
/// - Resolves variables and converts types to those more amenable to SQL. | }
// Go from arguments -- parser output -- to columns or values.
// Any variables that aren't bound by this point in the linear processing of clauses will
// cause the application of the predicate to fail.
let mut args = predicate.args.into_iter();
let left = args.next().expect("two args");
let right = args.next().expect("two args");
// The types we're handling here must be the intersection of the possible types of the arguments,
// the known types of any variables, and the types supported by our inequality operators.
let supported_types = comparison.supported_types();
let mut left_types = self.potential_types(known.schema, &left)?
.intersection(&supported_types);
if left_types.is_empty() {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 0));
}
let mut right_types = self.potential_types(known.schema, &right)?
.intersection(&supported_types);
if right_types.is_empty() {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 1));
}
// We would like to allow longs to compare to doubles.
// Do this by expanding the type sets. `resolve_numeric_argument` will
// use `Long` by preference.
if right_types.contains(ValueType::Long) {
right_types.insert(ValueType::Double);
}
if left_types.contains(ValueType::Long) {
left_types.insert(ValueType::Double);
}
let shared_types = left_types.intersection(&right_types);
if shared_types.is_empty() {
// In isolation these are both valid inputs to the operator, but the query cannot
// succeed because the types don't match.
self.mark_known_empty(
if let Some(var) = left.as_variable().or_else(|| right.as_variable()) {
EmptyBecause::TypeMismatch {
var: var.clone(),
existing: left_types,
desired: right_types,
}
} else {
EmptyBecause::KnownTypeMismatch {
left: left_types,
right: right_types,
}
});
return Ok(());
}
// We expect the intersection to be Long, Long+Double, Double, or Instant.
let left_v;
let right_v;
if shared_types == ValueTypeSet::of_one(ValueType::Instant) {
left_v = self.resolve_instant_argument(&predicate.operator, 0, left)?;
right_v = self.resolve_instant_argument(&predicate.operator, 1, right)?;
} else if shared_types.is_only_numeric() {
left_v = self.resolve_numeric_argument(&predicate.operator, 0, left)?;
right_v = self.resolve_numeric_argument(&predicate.operator, 1, right)?;
} else if shared_types == ValueTypeSet::of_one(ValueType::Ref) {
left_v = self.resolve_ref_argument(known.schema, &predicate.operator, 0, left)?;
right_v = self.resolve_ref_argument(known.schema, &predicate.operator, 1, right)?;
} else {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 0));
}
// These arguments must be variables or instant/numeric constants.
// TODO: static evaluation. #383.
let constraint = comparison.to_constraint(left_v, right_v);
self.wheres.add_intersection(constraint);
Ok(())
}
}
impl Inequality {
fn to_constraint(&self, left: QueryValue, right: QueryValue) -> ColumnConstraint {
match *self {
Inequality::TxAfter |
Inequality::TxBefore => {
// TODO: both ends of the range must be inside the tx partition!
// If we know the partition map -- and at this point we do, it's just
// not passed to this function -- then we can generate two constraints,
// or clamp a fixed value.
},
_ => {
},
}
ColumnConstraint::Inequality {
operator: *self,
left: left,
right: right,
}
}
}
#[cfg(test)]
mod testing {
use super::*;
use core_traits::attribute::{
Unique,
};
use core_traits::{
Attribute,
TypedValue,
ValueType,
};
use edn::query::{
FnArg,
Keyword,
Pattern,
PatternNonValuePlace,
PatternValuePlace,
PlainSymbol,
Variable,
};
use clauses::{
add_attribute,
associate_ident,
ident,
};
use types::{
ColumnConstraint,
EmptyBecause,
QueryValue,
};
#[test]
/// Apply two patterns: a pattern and a numeric predicate.
/// Verify that after application of the predicate we know that the value
/// must be numeric.
fn test_apply_inequality() {
let mut cc = ConjoiningClauses::default();
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "bar"), 99);
add_attribute(&mut schema, 99, Attribute {
value_type: ValueType::Long,
..Default::default()
});
let x = Variable::from_valid_name("?x");
let y = Variable::from_valid_name("?y");
let known = Known::for_schema(&schema);
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: PatternNonValuePlace::Placeholder,
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
assert!(!cc.is_known_empty());
let op = PlainSymbol::plain("<");
let comp = Inequality::from_datalog_operator(op.name()).unwrap();
assert!(cc.apply_inequality(known, comp, Predicate {
operator: op,
args: vec![
FnArg::Variable(Variable::from_valid_name("?y")), FnArg::EntidOrInteger(10),
]}).is_ok());
assert!(!cc.is_known_empty());
// Finally, expand column bindings to get the overlaps for?x.
cc.expand_column_bindings();
assert!(!cc.is_known_empty());
// After processing those two clauses, we know that?y must be numeric, but not exactly
// which type it must be.
assert_eq!(None, cc.known_type(&y)); // Not just one.
let expected = ValueTypeSet::of_numeric_types();
assert_eq!(Some(&expected), cc.known_types.get(&y));
let clauses = cc.wheres;
assert_eq!(clauses.len(), 1);
assert_eq!(clauses.0[0], ColumnConstraint::Inequality {
operator: Inequality::LessThan,
left: QueryValue::Column(cc.column_bindings.get(&y).unwrap()[0].clone()),
right: QueryValue::TypedValue(TypedValue::Long(10)),
}.into());
}
#[test]
/// Apply three patterns: an unbound pattern to establish a value var,
/// a predicate to constrain the val to numeric types, and a third pattern to conflict with the
/// numeric types and cause the pattern to fail.
fn test_apply_conflict_with_numeric_range() {
let mut cc = ConjoiningClauses::default();
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "bar"), 99);
associate_ident(&mut schema, Keyword::namespaced("foo", "roz"), 98);
add_attribute(&mut schema, 99, Attribute {
value_type: ValueType::Long,
..Default::default()
});
add_attribute(&mut schema, 98, Attribute {
value_type: ValueType::String,
unique: Some(Unique::Identity),
..Default::default()
});
let x = Variable::from_valid_name("?x");
let y = Variable::from_valid_name("?y");
let known = Known::for_schema(&schema);
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: PatternNonValuePlace::Placeholder,
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
assert!(!cc.is_known_empty());
let op = PlainSymbol::plain(">=");
let comp = Inequality::from_datalog_operator(op.name()).unwrap();
assert!(cc.apply_inequality(known, comp, Predicate {
operator: op,
args: vec![
FnArg::Variable(Variable::from_valid_name("?y")), FnArg::EntidOrInteger(10),
]}).is_ok());
assert!(!cc.is_known_empty());
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: ident("foo", "roz"),
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
// Finally, expand column bindings to get the overlaps for?x.
cc.expand_column_bindings();
assert!(cc.is_known_empty());
assert_eq!(cc.empty_because.unwrap(),
EmptyBecause::TypeMismatch {
var: y.clone(),
existing: ValueTypeSet::of_numeric_types(),
desired: ValueTypeSet::of_one(ValueType::String),
});
}
} | /// - Ensures that the predicate functions name a known operator.
/// - Accumulates an `Inequality` constraint into the `wheres` list.
pub(crate) fn apply_inequality(&mut self, known: Known, comparison: Inequality, predicate: Predicate) -> Result<()> {
if predicate.args.len() != 2 {
bail!(AlgebrizerError::InvalidNumberOfArguments(predicate.operator.clone(), predicate.args.len(), 2)); | random_line_split |
predicate.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use core_traits::{
ValueType,
ValueTypeSet,
};
use mentat_core::{
Schema,
};
use edn::query::{
FnArg,
PlainSymbol,
Predicate,
TypeAnnotation,
};
use clauses::ConjoiningClauses;
use clauses::convert::ValueTypes;
use query_algebrizer_traits::errors::{
AlgebrizerError,
Result,
};
use types::{
ColumnConstraint,
EmptyBecause,
Inequality,
QueryValue,
};
use Known;
/// Application of predicates.
impl ConjoiningClauses {
/// There are several kinds of predicates in our Datalog:
/// - A limited set of binary comparison operators: < > <= >=!=.
/// These are converted into SQLite binary comparisons and some type constraints.
/// - In the future, some predicates that are implemented via function calls in SQLite.
///
/// At present we have implemented only the five built-in comparison binary operators.
pub(crate) fn apply_predicate(&mut self, known: Known, predicate: Predicate) -> Result<()> {
// Because we'll be growing the set of built-in predicates, handling each differently,
// and ultimately allowing user-specified predicates, we match on the predicate name first.
if let Some(op) = Inequality::from_datalog_operator(predicate.operator.0.as_str()) {
self.apply_inequality(known, op, predicate)
} else {
bail!(AlgebrizerError::UnknownFunction(predicate.operator.clone()))
}
}
fn potential_types(&self, schema: &Schema, fn_arg: &FnArg) -> Result<ValueTypeSet> {
match fn_arg {
&FnArg::Variable(ref v) => Ok(self.known_type_set(v)),
_ => fn_arg.potential_types(schema),
}
}
/// Apply a type annotation, which is a construct like a predicate that constrains the argument
/// to be a specific ValueType.
pub(crate) fn apply_type_anno(&mut self, anno: &TypeAnnotation) -> Result<()> |
/// This function:
/// - Resolves variables and converts types to those more amenable to SQL.
/// - Ensures that the predicate functions name a known operator.
/// - Accumulates an `Inequality` constraint into the `wheres` list.
pub(crate) fn apply_inequality(&mut self, known: Known, comparison: Inequality, predicate: Predicate) -> Result<()> {
if predicate.args.len()!= 2 {
bail!(AlgebrizerError::InvalidNumberOfArguments(predicate.operator.clone(), predicate.args.len(), 2));
}
// Go from arguments -- parser output -- to columns or values.
// Any variables that aren't bound by this point in the linear processing of clauses will
// cause the application of the predicate to fail.
let mut args = predicate.args.into_iter();
let left = args.next().expect("two args");
let right = args.next().expect("two args");
// The types we're handling here must be the intersection of the possible types of the arguments,
// the known types of any variables, and the types supported by our inequality operators.
let supported_types = comparison.supported_types();
let mut left_types = self.potential_types(known.schema, &left)?
.intersection(&supported_types);
if left_types.is_empty() {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 0));
}
let mut right_types = self.potential_types(known.schema, &right)?
.intersection(&supported_types);
if right_types.is_empty() {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 1));
}
// We would like to allow longs to compare to doubles.
// Do this by expanding the type sets. `resolve_numeric_argument` will
// use `Long` by preference.
if right_types.contains(ValueType::Long) {
right_types.insert(ValueType::Double);
}
if left_types.contains(ValueType::Long) {
left_types.insert(ValueType::Double);
}
let shared_types = left_types.intersection(&right_types);
if shared_types.is_empty() {
// In isolation these are both valid inputs to the operator, but the query cannot
// succeed because the types don't match.
self.mark_known_empty(
if let Some(var) = left.as_variable().or_else(|| right.as_variable()) {
EmptyBecause::TypeMismatch {
var: var.clone(),
existing: left_types,
desired: right_types,
}
} else {
EmptyBecause::KnownTypeMismatch {
left: left_types,
right: right_types,
}
});
return Ok(());
}
// We expect the intersection to be Long, Long+Double, Double, or Instant.
let left_v;
let right_v;
if shared_types == ValueTypeSet::of_one(ValueType::Instant) {
left_v = self.resolve_instant_argument(&predicate.operator, 0, left)?;
right_v = self.resolve_instant_argument(&predicate.operator, 1, right)?;
} else if shared_types.is_only_numeric() {
left_v = self.resolve_numeric_argument(&predicate.operator, 0, left)?;
right_v = self.resolve_numeric_argument(&predicate.operator, 1, right)?;
} else if shared_types == ValueTypeSet::of_one(ValueType::Ref) {
left_v = self.resolve_ref_argument(known.schema, &predicate.operator, 0, left)?;
right_v = self.resolve_ref_argument(known.schema, &predicate.operator, 1, right)?;
} else {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 0));
}
// These arguments must be variables or instant/numeric constants.
// TODO: static evaluation. #383.
let constraint = comparison.to_constraint(left_v, right_v);
self.wheres.add_intersection(constraint);
Ok(())
}
}
impl Inequality {
fn to_constraint(&self, left: QueryValue, right: QueryValue) -> ColumnConstraint {
match *self {
Inequality::TxAfter |
Inequality::TxBefore => {
// TODO: both ends of the range must be inside the tx partition!
// If we know the partition map -- and at this point we do, it's just
// not passed to this function -- then we can generate two constraints,
// or clamp a fixed value.
},
_ => {
},
}
ColumnConstraint::Inequality {
operator: *self,
left: left,
right: right,
}
}
}
#[cfg(test)]
mod testing {
use super::*;
use core_traits::attribute::{
Unique,
};
use core_traits::{
Attribute,
TypedValue,
ValueType,
};
use edn::query::{
FnArg,
Keyword,
Pattern,
PatternNonValuePlace,
PatternValuePlace,
PlainSymbol,
Variable,
};
use clauses::{
add_attribute,
associate_ident,
ident,
};
use types::{
ColumnConstraint,
EmptyBecause,
QueryValue,
};
#[test]
/// Apply two patterns: a pattern and a numeric predicate.
/// Verify that after application of the predicate we know that the value
/// must be numeric.
fn test_apply_inequality() {
let mut cc = ConjoiningClauses::default();
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "bar"), 99);
add_attribute(&mut schema, 99, Attribute {
value_type: ValueType::Long,
..Default::default()
});
let x = Variable::from_valid_name("?x");
let y = Variable::from_valid_name("?y");
let known = Known::for_schema(&schema);
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: PatternNonValuePlace::Placeholder,
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
assert!(!cc.is_known_empty());
let op = PlainSymbol::plain("<");
let comp = Inequality::from_datalog_operator(op.name()).unwrap();
assert!(cc.apply_inequality(known, comp, Predicate {
operator: op,
args: vec![
FnArg::Variable(Variable::from_valid_name("?y")), FnArg::EntidOrInteger(10),
]}).is_ok());
assert!(!cc.is_known_empty());
// Finally, expand column bindings to get the overlaps for?x.
cc.expand_column_bindings();
assert!(!cc.is_known_empty());
// After processing those two clauses, we know that?y must be numeric, but not exactly
// which type it must be.
assert_eq!(None, cc.known_type(&y)); // Not just one.
let expected = ValueTypeSet::of_numeric_types();
assert_eq!(Some(&expected), cc.known_types.get(&y));
let clauses = cc.wheres;
assert_eq!(clauses.len(), 1);
assert_eq!(clauses.0[0], ColumnConstraint::Inequality {
operator: Inequality::LessThan,
left: QueryValue::Column(cc.column_bindings.get(&y).unwrap()[0].clone()),
right: QueryValue::TypedValue(TypedValue::Long(10)),
}.into());
}
#[test]
/// Apply three patterns: an unbound pattern to establish a value var,
/// a predicate to constrain the val to numeric types, and a third pattern to conflict with the
/// numeric types and cause the pattern to fail.
fn test_apply_conflict_with_numeric_range() {
let mut cc = ConjoiningClauses::default();
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "bar"), 99);
associate_ident(&mut schema, Keyword::namespaced("foo", "roz"), 98);
add_attribute(&mut schema, 99, Attribute {
value_type: ValueType::Long,
..Default::default()
});
add_attribute(&mut schema, 98, Attribute {
value_type: ValueType::String,
unique: Some(Unique::Identity),
..Default::default()
});
let x = Variable::from_valid_name("?x");
let y = Variable::from_valid_name("?y");
let known = Known::for_schema(&schema);
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: PatternNonValuePlace::Placeholder,
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
assert!(!cc.is_known_empty());
let op = PlainSymbol::plain(">=");
let comp = Inequality::from_datalog_operator(op.name()).unwrap();
assert!(cc.apply_inequality(known, comp, Predicate {
operator: op,
args: vec![
FnArg::Variable(Variable::from_valid_name("?y")), FnArg::EntidOrInteger(10),
]}).is_ok());
assert!(!cc.is_known_empty());
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: ident("foo", "roz"),
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
// Finally, expand column bindings to get the overlaps for?x.
cc.expand_column_bindings();
assert!(cc.is_known_empty());
assert_eq!(cc.empty_because.unwrap(),
EmptyBecause::TypeMismatch {
var: y.clone(),
existing: ValueTypeSet::of_numeric_types(),
desired: ValueTypeSet::of_one(ValueType::String),
});
}
}
| {
match ValueType::from_keyword(&anno.value_type) {
Some(value_type) => self.add_type_requirement(anno.variable.clone(), ValueTypeSet::of_one(value_type)),
None => bail!(AlgebrizerError::InvalidArgumentType(PlainSymbol::plain("type"), ValueTypeSet::any(), 2)),
}
Ok(())
} | identifier_body |
predicate.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use core_traits::{
ValueType,
ValueTypeSet,
};
use mentat_core::{
Schema,
};
use edn::query::{
FnArg,
PlainSymbol,
Predicate,
TypeAnnotation,
};
use clauses::ConjoiningClauses;
use clauses::convert::ValueTypes;
use query_algebrizer_traits::errors::{
AlgebrizerError,
Result,
};
use types::{
ColumnConstraint,
EmptyBecause,
Inequality,
QueryValue,
};
use Known;
/// Application of predicates.
impl ConjoiningClauses {
/// There are several kinds of predicates in our Datalog:
/// - A limited set of binary comparison operators: < > <= >=!=.
/// These are converted into SQLite binary comparisons and some type constraints.
/// - In the future, some predicates that are implemented via function calls in SQLite.
///
/// At present we have implemented only the five built-in comparison binary operators.
pub(crate) fn apply_predicate(&mut self, known: Known, predicate: Predicate) -> Result<()> {
// Because we'll be growing the set of built-in predicates, handling each differently,
// and ultimately allowing user-specified predicates, we match on the predicate name first.
if let Some(op) = Inequality::from_datalog_operator(predicate.operator.0.as_str()) {
self.apply_inequality(known, op, predicate)
} else {
bail!(AlgebrizerError::UnknownFunction(predicate.operator.clone()))
}
}
fn potential_types(&self, schema: &Schema, fn_arg: &FnArg) -> Result<ValueTypeSet> {
match fn_arg {
&FnArg::Variable(ref v) => Ok(self.known_type_set(v)),
_ => fn_arg.potential_types(schema),
}
}
/// Apply a type annotation, which is a construct like a predicate that constrains the argument
/// to be a specific ValueType.
pub(crate) fn apply_type_anno(&mut self, anno: &TypeAnnotation) -> Result<()> {
match ValueType::from_keyword(&anno.value_type) {
Some(value_type) => self.add_type_requirement(anno.variable.clone(), ValueTypeSet::of_one(value_type)),
None => bail!(AlgebrizerError::InvalidArgumentType(PlainSymbol::plain("type"), ValueTypeSet::any(), 2)),
}
Ok(())
}
/// This function:
/// - Resolves variables and converts types to those more amenable to SQL.
/// - Ensures that the predicate functions name a known operator.
/// - Accumulates an `Inequality` constraint into the `wheres` list.
pub(crate) fn apply_inequality(&mut self, known: Known, comparison: Inequality, predicate: Predicate) -> Result<()> {
if predicate.args.len()!= 2 {
bail!(AlgebrizerError::InvalidNumberOfArguments(predicate.operator.clone(), predicate.args.len(), 2));
}
// Go from arguments -- parser output -- to columns or values.
// Any variables that aren't bound by this point in the linear processing of clauses will
// cause the application of the predicate to fail.
let mut args = predicate.args.into_iter();
let left = args.next().expect("two args");
let right = args.next().expect("two args");
// The types we're handling here must be the intersection of the possible types of the arguments,
// the known types of any variables, and the types supported by our inequality operators.
let supported_types = comparison.supported_types();
let mut left_types = self.potential_types(known.schema, &left)?
.intersection(&supported_types);
if left_types.is_empty() {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 0));
}
let mut right_types = self.potential_types(known.schema, &right)?
.intersection(&supported_types);
if right_types.is_empty() {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 1));
}
// We would like to allow longs to compare to doubles.
// Do this by expanding the type sets. `resolve_numeric_argument` will
// use `Long` by preference.
if right_types.contains(ValueType::Long) {
right_types.insert(ValueType::Double);
}
if left_types.contains(ValueType::Long) {
left_types.insert(ValueType::Double);
}
let shared_types = left_types.intersection(&right_types);
if shared_types.is_empty() {
// In isolation these are both valid inputs to the operator, but the query cannot
// succeed because the types don't match.
self.mark_known_empty(
if let Some(var) = left.as_variable().or_else(|| right.as_variable()) {
EmptyBecause::TypeMismatch {
var: var.clone(),
existing: left_types,
desired: right_types,
}
} else {
EmptyBecause::KnownTypeMismatch {
left: left_types,
right: right_types,
}
});
return Ok(());
}
// We expect the intersection to be Long, Long+Double, Double, or Instant.
let left_v;
let right_v;
if shared_types == ValueTypeSet::of_one(ValueType::Instant) {
left_v = self.resolve_instant_argument(&predicate.operator, 0, left)?;
right_v = self.resolve_instant_argument(&predicate.operator, 1, right)?;
} else if shared_types.is_only_numeric() {
left_v = self.resolve_numeric_argument(&predicate.operator, 0, left)?;
right_v = self.resolve_numeric_argument(&predicate.operator, 1, right)?;
} else if shared_types == ValueTypeSet::of_one(ValueType::Ref) | else {
bail!(AlgebrizerError::InvalidArgumentType(predicate.operator.clone(), supported_types, 0));
}
// These arguments must be variables or instant/numeric constants.
// TODO: static evaluation. #383.
let constraint = comparison.to_constraint(left_v, right_v);
self.wheres.add_intersection(constraint);
Ok(())
}
}
impl Inequality {
fn to_constraint(&self, left: QueryValue, right: QueryValue) -> ColumnConstraint {
match *self {
Inequality::TxAfter |
Inequality::TxBefore => {
// TODO: both ends of the range must be inside the tx partition!
// If we know the partition map -- and at this point we do, it's just
// not passed to this function -- then we can generate two constraints,
// or clamp a fixed value.
},
_ => {
},
}
ColumnConstraint::Inequality {
operator: *self,
left: left,
right: right,
}
}
}
#[cfg(test)]
mod testing {
use super::*;
use core_traits::attribute::{
Unique,
};
use core_traits::{
Attribute,
TypedValue,
ValueType,
};
use edn::query::{
FnArg,
Keyword,
Pattern,
PatternNonValuePlace,
PatternValuePlace,
PlainSymbol,
Variable,
};
use clauses::{
add_attribute,
associate_ident,
ident,
};
use types::{
ColumnConstraint,
EmptyBecause,
QueryValue,
};
#[test]
/// Apply two patterns: a pattern and a numeric predicate.
/// Verify that after application of the predicate we know that the value
/// must be numeric.
fn test_apply_inequality() {
let mut cc = ConjoiningClauses::default();
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "bar"), 99);
add_attribute(&mut schema, 99, Attribute {
value_type: ValueType::Long,
..Default::default()
});
let x = Variable::from_valid_name("?x");
let y = Variable::from_valid_name("?y");
let known = Known::for_schema(&schema);
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: PatternNonValuePlace::Placeholder,
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
assert!(!cc.is_known_empty());
let op = PlainSymbol::plain("<");
let comp = Inequality::from_datalog_operator(op.name()).unwrap();
assert!(cc.apply_inequality(known, comp, Predicate {
operator: op,
args: vec![
FnArg::Variable(Variable::from_valid_name("?y")), FnArg::EntidOrInteger(10),
]}).is_ok());
assert!(!cc.is_known_empty());
// Finally, expand column bindings to get the overlaps for?x.
cc.expand_column_bindings();
assert!(!cc.is_known_empty());
// After processing those two clauses, we know that?y must be numeric, but not exactly
// which type it must be.
assert_eq!(None, cc.known_type(&y)); // Not just one.
let expected = ValueTypeSet::of_numeric_types();
assert_eq!(Some(&expected), cc.known_types.get(&y));
let clauses = cc.wheres;
assert_eq!(clauses.len(), 1);
assert_eq!(clauses.0[0], ColumnConstraint::Inequality {
operator: Inequality::LessThan,
left: QueryValue::Column(cc.column_bindings.get(&y).unwrap()[0].clone()),
right: QueryValue::TypedValue(TypedValue::Long(10)),
}.into());
}
#[test]
/// Apply three patterns: an unbound pattern to establish a value var,
/// a predicate to constrain the val to numeric types, and a third pattern to conflict with the
/// numeric types and cause the pattern to fail.
fn test_apply_conflict_with_numeric_range() {
let mut cc = ConjoiningClauses::default();
let mut schema = Schema::default();
associate_ident(&mut schema, Keyword::namespaced("foo", "bar"), 99);
associate_ident(&mut schema, Keyword::namespaced("foo", "roz"), 98);
add_attribute(&mut schema, 99, Attribute {
value_type: ValueType::Long,
..Default::default()
});
add_attribute(&mut schema, 98, Attribute {
value_type: ValueType::String,
unique: Some(Unique::Identity),
..Default::default()
});
let x = Variable::from_valid_name("?x");
let y = Variable::from_valid_name("?y");
let known = Known::for_schema(&schema);
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: PatternNonValuePlace::Placeholder,
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
assert!(!cc.is_known_empty());
let op = PlainSymbol::plain(">=");
let comp = Inequality::from_datalog_operator(op.name()).unwrap();
assert!(cc.apply_inequality(known, comp, Predicate {
operator: op,
args: vec![
FnArg::Variable(Variable::from_valid_name("?y")), FnArg::EntidOrInteger(10),
]}).is_ok());
assert!(!cc.is_known_empty());
cc.apply_parsed_pattern(known, Pattern {
source: None,
entity: PatternNonValuePlace::Variable(x.clone()),
attribute: ident("foo", "roz"),
value: PatternValuePlace::Variable(y.clone()),
tx: PatternNonValuePlace::Placeholder,
});
// Finally, expand column bindings to get the overlaps for?x.
cc.expand_column_bindings();
assert!(cc.is_known_empty());
assert_eq!(cc.empty_because.unwrap(),
EmptyBecause::TypeMismatch {
var: y.clone(),
existing: ValueTypeSet::of_numeric_types(),
desired: ValueTypeSet::of_one(ValueType::String),
});
}
}
| {
left_v = self.resolve_ref_argument(known.schema, &predicate.operator, 0, left)?;
right_v = self.resolve_ref_argument(known.schema, &predicate.operator, 1, right)?;
} | conditional_block |
message.rs | use ascii_canvas::AsciiView;
use grammar::parse_tree::Span;
use message::Content;
use std::cmp;
use std::fmt::{Debug, Error, Formatter};
use style::Style;
use tls::Tls;
/// The top-level message display like this:
///
/// ```
/// <span>: <heading>
///
/// <body>
/// ```
///
/// This is equivalent to a
///
/// ```
/// Vert[separate=2] {
/// Horiz[separate=1] {
/// Horiz[separate=0] {
/// Citation { span },
/// Text { ":" },
/// },
/// <heading>,
/// },
/// <body>
/// }
/// ```
pub struct Message {
span: Span,
heading: Box<Content>,
body: Box<Content>,
}
| span: span,
heading: heading,
body: body,
}
}
}
impl Content for Message {
fn min_width(&self) -> usize {
let file_text = Tls::file_text();
let span = file_text.span_str(self.span).chars().count();
let heading = self.heading.min_width();
let body = self.body.min_width();
cmp::max(span + heading + 2, body + 2)
}
fn emit(&self, view: &mut AsciiView) {
let session = Tls::session();
let file_text = Tls::file_text();
let span = file_text.span_str(self.span);
view.write_chars(0, 0, span.chars(), Style::new());
let count = span.chars().count();
view.write_chars(0, count, ":".chars(), Style::new());
let (row, _) = self.heading
.emit_at(&mut view.styled(session.heading), 0, count + 2);
self.body.emit_at(view, row + 2, 2);
}
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<Content>>) {
wrap_items.push(self);
}
}
impl Debug for Message {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
fmt.debug_struct("Message")
.field("span", &self.span)
.field("heading", &self.heading)
.field("body", &self.body)
.finish()
}
} | impl Message {
pub fn new(span: Span, heading: Box<Content>, body: Box<Content>) -> Self {
Message { | random_line_split |
message.rs | use ascii_canvas::AsciiView;
use grammar::parse_tree::Span;
use message::Content;
use std::cmp;
use std::fmt::{Debug, Error, Formatter};
use style::Style;
use tls::Tls;
/// The top-level message display like this:
///
/// ```
/// <span>: <heading>
///
/// <body>
/// ```
///
/// This is equivalent to a
///
/// ```
/// Vert[separate=2] {
/// Horiz[separate=1] {
/// Horiz[separate=0] {
/// Citation { span },
/// Text { ":" },
/// },
/// <heading>,
/// },
/// <body>
/// }
/// ```
pub struct Message {
span: Span,
heading: Box<Content>,
body: Box<Content>,
}
impl Message {
pub fn new(span: Span, heading: Box<Content>, body: Box<Content>) -> Self {
Message {
span: span,
heading: heading,
body: body,
}
}
}
impl Content for Message {
fn min_width(&self) -> usize {
let file_text = Tls::file_text();
let span = file_text.span_str(self.span).chars().count();
let heading = self.heading.min_width();
let body = self.body.min_width();
cmp::max(span + heading + 2, body + 2)
}
fn emit(&self, view: &mut AsciiView) {
let session = Tls::session();
let file_text = Tls::file_text();
let span = file_text.span_str(self.span);
view.write_chars(0, 0, span.chars(), Style::new());
let count = span.chars().count();
view.write_chars(0, count, ":".chars(), Style::new());
let (row, _) = self.heading
.emit_at(&mut view.styled(session.heading), 0, count + 2);
self.body.emit_at(view, row + 2, 2);
}
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<Content>>) {
wrap_items.push(self);
}
}
impl Debug for Message {
fn | (&self, fmt: &mut Formatter) -> Result<(), Error> {
fmt.debug_struct("Message")
.field("span", &self.span)
.field("heading", &self.heading)
.field("body", &self.body)
.finish()
}
}
| fmt | identifier_name |
message.rs | use ascii_canvas::AsciiView;
use grammar::parse_tree::Span;
use message::Content;
use std::cmp;
use std::fmt::{Debug, Error, Formatter};
use style::Style;
use tls::Tls;
/// The top-level message display like this:
///
/// ```
/// <span>: <heading>
///
/// <body>
/// ```
///
/// This is equivalent to a
///
/// ```
/// Vert[separate=2] {
/// Horiz[separate=1] {
/// Horiz[separate=0] {
/// Citation { span },
/// Text { ":" },
/// },
/// <heading>,
/// },
/// <body>
/// }
/// ```
pub struct Message {
span: Span,
heading: Box<Content>,
body: Box<Content>,
}
impl Message {
pub fn new(span: Span, heading: Box<Content>, body: Box<Content>) -> Self {
Message {
span: span,
heading: heading,
body: body,
}
}
}
impl Content for Message {
fn min_width(&self) -> usize {
let file_text = Tls::file_text();
let span = file_text.span_str(self.span).chars().count();
let heading = self.heading.min_width();
let body = self.body.min_width();
cmp::max(span + heading + 2, body + 2)
}
fn emit(&self, view: &mut AsciiView) |
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<Content>>) {
wrap_items.push(self);
}
}
impl Debug for Message {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
fmt.debug_struct("Message")
.field("span", &self.span)
.field("heading", &self.heading)
.field("body", &self.body)
.finish()
}
}
| {
let session = Tls::session();
let file_text = Tls::file_text();
let span = file_text.span_str(self.span);
view.write_chars(0, 0, span.chars(), Style::new());
let count = span.chars().count();
view.write_chars(0, count, ":".chars(), Style::new());
let (row, _) = self.heading
.emit_at(&mut view.styled(session.heading), 0, count + 2);
self.body.emit_at(view, row + 2, 2);
} | identifier_body |
transfer.rs | use params::{List, Metadata, Timestamp};
use resources::Currency;
/// The resource representing a Stripe transfer reversal.
///
/// For more details see https://stripe.com/docs/api#transfer_reversal_object.
#[derive(Debug, Deserialize, Serialize)]
pub struct TransferReversal {
pub id: String,
pub object: String,
pub amount: u64,
pub balance_transaction: String,
pub created: Timestamp,
pub currency: Currency,
pub metadata: Metadata,
pub transfer: String,
}
| /// For more details see https://stripe.com/docs/api#transfer_object.
#[derive(Debug, Deserialize, Serialize)]
pub struct Transfer {
pub id: String,
pub object: String,
pub amount: u64,
pub amount_reversed: u64,
pub balance_transaction: String,
pub created: Timestamp,
pub currency: Currency,
pub description: Option<String>,
pub destination: String,
pub destination_payment: String,
pub livemode: bool,
pub metadata: Metadata,
pub reversals: List<TransferReversal>,
pub reversed: bool,
pub source_transaction: String,
pub source_type: String,
pub transfer_group: String,
} | /// The resource representing a Stripe transfer.
/// | random_line_split |
transfer.rs | use params::{List, Metadata, Timestamp};
use resources::Currency;
/// The resource representing a Stripe transfer reversal.
///
/// For more details see https://stripe.com/docs/api#transfer_reversal_object.
#[derive(Debug, Deserialize, Serialize)]
pub struct TransferReversal {
pub id: String,
pub object: String,
pub amount: u64,
pub balance_transaction: String,
pub created: Timestamp,
pub currency: Currency,
pub metadata: Metadata,
pub transfer: String,
}
/// The resource representing a Stripe transfer.
///
/// For more details see https://stripe.com/docs/api#transfer_object.
#[derive(Debug, Deserialize, Serialize)]
pub struct | {
pub id: String,
pub object: String,
pub amount: u64,
pub amount_reversed: u64,
pub balance_transaction: String,
pub created: Timestamp,
pub currency: Currency,
pub description: Option<String>,
pub destination: String,
pub destination_payment: String,
pub livemode: bool,
pub metadata: Metadata,
pub reversals: List<TransferReversal>,
pub reversed: bool,
pub source_transaction: String,
pub source_type: String,
pub transfer_group: String,
}
| Transfer | identifier_name |
negative_iterator.rs | use std::iter::*;
pub trait NegativeIterator {
type Item;
type IntoIter : Iterator;
fn neg_iter(&self) -> Self::IntoIter;
}
impl<T> NegativeIterator for Option<T> {
type Item = ();
type IntoIter = <Option<()> as IntoIterator>::IntoIter;
fn neg_iter(&self) -> Self::IntoIter {
match *self {
Some(_) => None,
None => Some(())
}.into_iter()
}
}
impl<'a, T, E> NegativeIterator for &'a Result<T, E> {
type Item = &'a E;
type IntoIter = <Option<&'a E> as IntoIterator>::IntoIter;
fn neg_iter(&self) -> Self::IntoIter {
match **self {
Ok(_) => None,
Err(ref err) => Some(err)
}.into_iter()
}
}
impl<'a, T, I, II> NegativeIterator for &'a T
where
II: Iterator,
T: NegativeIterator<Item=I, IntoIter=II>,
{
type Item = I;
type IntoIter = II;
fn neg_iter(&self) -> Self::IntoIter {
NegativeIterator::neg_iter(*self)
}
}
// TODO impl for Vec<_> that checks if len() == 0
// -> Maybe tell people to use negative conditional instead {{^vec?}}...
// TODO impl for [T]
#[cfg(test)]
mod test {
use super::NegativeIterator;
#[test]
fn option_some() {
for _ in Some(5).neg_iter() {
panic!("Should not iterate");
}
}
#[test]
fn option_none() |
#[test]
fn result_ok() {
let result: Result<i32, i32> = Ok(5);
for _ in (&result).neg_iter() {
panic!("Should not iterate");
}
}
#[test]
fn result_err() {
let mut iterations = 0;
let result: Result<i32, i32> = Err(5);
for x in (&result).neg_iter() {
iterations += 1;
assert_eq!(&5, x);
}
assert_eq!(1, iterations);
}
}
| {
let mut iterations = 0;
let option: Option<i32> = None;
for _ in option.neg_iter() {
iterations += 1;
}
assert_eq!(1, iterations);
} | identifier_body |
negative_iterator.rs | use std::iter::*;
pub trait NegativeIterator {
type Item;
type IntoIter : Iterator;
fn neg_iter(&self) -> Self::IntoIter;
}
impl<T> NegativeIterator for Option<T> {
type Item = ();
type IntoIter = <Option<()> as IntoIterator>::IntoIter;
fn neg_iter(&self) -> Self::IntoIter {
match *self {
Some(_) => None,
None => Some(())
}.into_iter()
}
}
impl<'a, T, E> NegativeIterator for &'a Result<T, E> {
type Item = &'a E;
type IntoIter = <Option<&'a E> as IntoIterator>::IntoIter;
fn neg_iter(&self) -> Self::IntoIter { | }.into_iter()
}
}
impl<'a, T, I, II> NegativeIterator for &'a T
where
II: Iterator,
T: NegativeIterator<Item=I, IntoIter=II>,
{
type Item = I;
type IntoIter = II;
fn neg_iter(&self) -> Self::IntoIter {
NegativeIterator::neg_iter(*self)
}
}
// TODO impl for Vec<_> that checks if len() == 0
// -> Maybe tell people to use negative conditional instead {{^vec?}}...
// TODO impl for [T]
#[cfg(test)]
mod test {
use super::NegativeIterator;
#[test]
fn option_some() {
for _ in Some(5).neg_iter() {
panic!("Should not iterate");
}
}
#[test]
fn option_none() {
let mut iterations = 0;
let option: Option<i32> = None;
for _ in option.neg_iter() {
iterations += 1;
}
assert_eq!(1, iterations);
}
#[test]
fn result_ok() {
let result: Result<i32, i32> = Ok(5);
for _ in (&result).neg_iter() {
panic!("Should not iterate");
}
}
#[test]
fn result_err() {
let mut iterations = 0;
let result: Result<i32, i32> = Err(5);
for x in (&result).neg_iter() {
iterations += 1;
assert_eq!(&5, x);
}
assert_eq!(1, iterations);
}
} | match **self {
Ok(_) => None,
Err(ref err) => Some(err) | random_line_split |
negative_iterator.rs | use std::iter::*;
pub trait NegativeIterator {
type Item;
type IntoIter : Iterator;
fn neg_iter(&self) -> Self::IntoIter;
}
impl<T> NegativeIterator for Option<T> {
type Item = ();
type IntoIter = <Option<()> as IntoIterator>::IntoIter;
fn neg_iter(&self) -> Self::IntoIter {
match *self {
Some(_) => None,
None => Some(())
}.into_iter()
}
}
impl<'a, T, E> NegativeIterator for &'a Result<T, E> {
type Item = &'a E;
type IntoIter = <Option<&'a E> as IntoIterator>::IntoIter;
fn neg_iter(&self) -> Self::IntoIter {
match **self {
Ok(_) => None,
Err(ref err) => Some(err)
}.into_iter()
}
}
impl<'a, T, I, II> NegativeIterator for &'a T
where
II: Iterator,
T: NegativeIterator<Item=I, IntoIter=II>,
{
type Item = I;
type IntoIter = II;
fn neg_iter(&self) -> Self::IntoIter {
NegativeIterator::neg_iter(*self)
}
}
// TODO impl for Vec<_> that checks if len() == 0
// -> Maybe tell people to use negative conditional instead {{^vec?}}...
// TODO impl for [T]
#[cfg(test)]
mod test {
use super::NegativeIterator;
#[test]
fn option_some() {
for _ in Some(5).neg_iter() {
panic!("Should not iterate");
}
}
#[test]
fn | () {
let mut iterations = 0;
let option: Option<i32> = None;
for _ in option.neg_iter() {
iterations += 1;
}
assert_eq!(1, iterations);
}
#[test]
fn result_ok() {
let result: Result<i32, i32> = Ok(5);
for _ in (&result).neg_iter() {
panic!("Should not iterate");
}
}
#[test]
fn result_err() {
let mut iterations = 0;
let result: Result<i32, i32> = Err(5);
for x in (&result).neg_iter() {
iterations += 1;
assert_eq!(&5, x);
}
assert_eq!(1, iterations);
}
}
| option_none | identifier_name |
mod.rs | // Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The HTML5 tree builder.
use core::prelude::*;
pub use self::interface::{QuirksMode, Quirks, LimitedQuirks, NoQuirks};
pub use self::interface::{NodeOrText, AppendNode, AppendText};
pub use self::interface::{TreeSink, Tracer};
use self::types::*;
use self::actions::TreeBuilderActions;
use self::rules::TreeBuilderStep;
use tokenizer;
use tokenizer::{Doctype, Tag};
use tokenizer::TokenSink;
use util::str::{is_ascii_whitespace, char_run};
use core::default::Default;
use core::mem::replace;
use collections::vec::Vec;
use collections::string::String;
use std::borrow::Cow::Borrowed;
use collections::RingBuf;
#[macro_use] mod tag_sets;
mod interface;
mod data;
mod types;
mod actions;
mod rules;
/// Tree builder options, with an impl for Default.
#[derive(Copy, Clone)]
pub struct TreeBuilderOpts {
/// Report all parse errors described in the spec, at some
/// performance penalty? Default: false
pub exact_errors: bool,
/// Is scripting enabled?
pub scripting_enabled: bool,
/// Is this an `iframe srcdoc` document?
pub iframe_srcdoc: bool,
/// Are we parsing a HTML fragment?
pub fragment: bool,
/// Should we drop the DOCTYPE (if any) from the tree?
pub drop_doctype: bool,
/// The `<svg>`, `<math>`, and `<template>` tags have special
/// parsing rules that are currently unimplemented. By default
/// we `panic!()` if any of these tags is encountered. If this
/// option is enabled, we will instead attempt to parse them
/// using the ordinary HTML parsing rules.
///
/// **Warning**: This may produce extremely incorrect results
/// on some documents!
pub ignore_missing_rules: bool,
}
impl Default for TreeBuilderOpts {
fn default() -> TreeBuilderOpts {
TreeBuilderOpts {
exact_errors: false,
scripting_enabled: true,
iframe_srcdoc: false,
fragment: false,
drop_doctype: false,
ignore_missing_rules: false,
}
}
}
/// The HTML tree builder.
pub struct TreeBuilder<Handle, Sink> {
/// Options controlling the behavior of the tree builder.
opts: TreeBuilderOpts,
/// Consumer of tree modifications.
sink: Sink,
/// Insertion mode.
mode: InsertionMode,
/// Original insertion mode, used by Text and InTableText modes.
orig_mode: Option<InsertionMode>,
/// Stack of template insertion modes.
template_modes: Vec<InsertionMode>,
/// Pending table character tokens.
pending_table_text: Vec<(SplitStatus, String)>,
/// Quirks mode as set by the parser.
/// FIXME: can scripts etc. change this?
quirks_mode: QuirksMode,
/// The document node, which is created by the sink.
doc_handle: Handle,
/// Stack of open elements, most recently added at end.
open_elems: Vec<Handle>,
/// List of active formatting elements.
active_formatting: Vec<FormatEntry<Handle>>,
//§ the-element-pointers
/// Head element pointer.
head_elem: Option<Handle>,
/// Form element pointer.
form_elem: Option<Handle>,
//§ END
/// Next state change for the tokenizer, if any.
next_tokenizer_state: Option<tokenizer::states::State>,
/// Frameset-ok flag.
frameset_ok: bool,
/// Ignore a following U+000A LINE FEED?
ignore_lf: bool,
/// Is foster parenting enabled?
foster_parenting: bool,
// WARNING: If you add new fields that contain Handles, you
// must add them to trace_handles() below to preserve memory
// safety!
//
// FIXME: Auto-generate the trace hooks like Servo does.
}
impl<Handle, Sink> TreeBuilder<Handle, Sink>
where Handle: Clone,
Sink: TreeSink<Handle=Handle>,
{
/// Create a new tree builder which sends tree modifications to a particular `TreeSink`.
///
/// The tree builder is also a `TokenSink`.
pub fn new(mut sink: Sink, opts: TreeBuilderOpts) -> TreeBuilder<Handle, Sink> {
let doc_handle = sink.get_document();
TreeBuilder {
opts: opts,
sink: sink,
mode: Initial,
orig_mode: None,
template_modes: vec!(),
pending_table_text: vec!(),
quirks_mode: NoQuirks,
doc_handle: doc_handle,
open_elems: vec!(),
active_formatting: vec!(),
head_elem: None,
form_elem: None,
next_tokenizer_state: None,
frameset_ok: true,
ignore_lf: false,
foster_parenting: false,
}
}
pub fn unwrap(self) -> Sink {
self.sink
}
pub fn sink<'a>(&'a self) -> &'a Sink {
&self.sink
}
pub fn si | a>(&'a mut self) -> &'a mut Sink {
&mut self.sink
}
/// Call the `Tracer`'s `trace_handle` method on every `Handle` in the tree builder's
/// internal state. This is intended to support garbage-collected DOMs.
pub fn trace_handles(&self, tracer: &Tracer<Handle=Handle>) {
tracer.trace_handle(self.doc_handle.clone());
for e in self.open_elems.iter() {
tracer.trace_handle(e.clone());
}
for e in self.active_formatting.iter() {
match e {
&Element(ref h, _) => tracer.trace_handle(h.clone()),
_ => (),
}
}
self.head_elem.as_ref().map(|h| tracer.trace_handle(h.clone()));
self.form_elem.as_ref().map(|h| tracer.trace_handle(h.clone()));
}
// Debug helper
#[cfg(not(for_c))]
#[allow(dead_code)]
fn dump_state(&self, label: String) {
use string_cache::QualName;
println!("dump_state on {}", label);
print!(" open_elems:");
for node in self.open_elems.iter() {
let QualName { ns, local } = self.sink.elem_name(node.clone());
match ns {
ns!(HTML) => print!(" {:?}", local),
_ => panic!(),
}
}
println!("");
}
#[cfg(for_c)]
fn debug_step(&self, _mode: InsertionMode, _token: &Token) {
}
#[cfg(not(for_c))]
fn debug_step(&self, mode: InsertionMode, token: &Token) {
use util::str::to_escaped_string;
h5e_debug!("processing {} in insertion mode {:?}", to_escaped_string(token), mode);
}
fn process_to_completion(&mut self, mut token: Token) {
// Queue of additional tokens yet to be processed.
// This stays empty in the common case where we don't split whitespace.
let mut more_tokens = RingBuf::new();
loop {
let is_self_closing = match token {
TagToken(Tag { self_closing: c,.. }) => c,
_ => false,
};
let mode = self.mode;
match self.step(mode, token) {
Done => {
if is_self_closing {
self.sink.parse_error(Borrowed("Unacknowledged self-closing tag"));
}
token = unwrap_or_return!(more_tokens.pop_front(), ());
}
DoneAckSelfClosing => {
token = unwrap_or_return!(more_tokens.pop_front(), ());
}
Reprocess(m, t) => {
self.mode = m;
token = t;
}
SplitWhitespace(buf) => {
let buf = buf.as_slice();
let (len, is_ws) = unwrap_or_return!(
char_run(is_ascii_whitespace, buf), ());
token = CharacterTokens(
if is_ws { Whitespace } else { NotWhitespace },
String::from_str(buf.slice_to(len)));
if len < buf.len() {
more_tokens.push_back(
CharacterTokens(NotSplit, String::from_str(buf.slice_from(len))));
}
}
}
}
}
}
impl<Handle, Sink> TokenSink
for TreeBuilder<Handle, Sink>
where Handle: Clone,
Sink: TreeSink<Handle=Handle>,
{
fn process_token(&mut self, token: tokenizer::Token) {
let ignore_lf = replace(&mut self.ignore_lf, false);
// Handle `ParseError` and `DoctypeToken`; convert everything else to the local `Token` type.
let token = match token {
tokenizer::ParseError(e) => {
self.sink.parse_error(e);
return;
}
tokenizer::DoctypeToken(dt) => if self.mode == Initial {
let (err, quirk) = data::doctype_error_and_quirks(&dt, self.opts.iframe_srcdoc);
if err {
self.sink.parse_error(format_if!(
self.opts.exact_errors,
"Bad DOCTYPE",
"Bad DOCTYPE: {:?}", dt));
}
let Doctype { name, public_id, system_id, force_quirks: _ } = dt;
if!self.opts.drop_doctype {
self.sink.append_doctype_to_document(
name.unwrap_or(String::new()),
public_id.unwrap_or(String::new()),
system_id.unwrap_or(String::new())
);
}
self.set_quirks_mode(quirk);
self.mode = BeforeHtml;
return;
} else {
self.sink.parse_error(format_if!(
self.opts.exact_errors,
"DOCTYPE in body",
"DOCTYPE in insertion mode {:?}", self.mode));
return;
},
tokenizer::TagToken(x) => TagToken(x),
tokenizer::CommentToken(x) => CommentToken(x),
tokenizer::NullCharacterToken => NullCharacterToken,
tokenizer::EOFToken => EOFToken,
tokenizer::CharacterTokens(mut x) => {
if ignore_lf && x.len() >= 1 && x.as_slice().char_at(0) == '\n' {
x.remove(0);
}
if x.is_empty() {
return;
}
CharacterTokens(NotSplit, x)
}
};
self.process_to_completion(token);
}
fn query_state_change(&mut self) -> Option<tokenizer::states::State> {
self.next_tokenizer_state.take()
}
}
| nk_mut<' | identifier_name |
mod.rs | // Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The HTML5 tree builder.
use core::prelude::*;
pub use self::interface::{QuirksMode, Quirks, LimitedQuirks, NoQuirks};
pub use self::interface::{NodeOrText, AppendNode, AppendText};
pub use self::interface::{TreeSink, Tracer};
use self::types::*;
use self::actions::TreeBuilderActions;
use self::rules::TreeBuilderStep;
use tokenizer;
use tokenizer::{Doctype, Tag};
use tokenizer::TokenSink;
use util::str::{is_ascii_whitespace, char_run};
use core::default::Default;
use core::mem::replace;
use collections::vec::Vec;
use collections::string::String;
use std::borrow::Cow::Borrowed;
use collections::RingBuf;
#[macro_use] mod tag_sets;
mod interface;
mod data;
mod types;
mod actions;
mod rules;
/// Tree builder options, with an impl for Default.
#[derive(Copy, Clone)]
pub struct TreeBuilderOpts {
/// Report all parse errors described in the spec, at some
/// performance penalty? Default: false
pub exact_errors: bool,
/// Is scripting enabled?
pub scripting_enabled: bool,
/// Is this an `iframe srcdoc` document?
pub iframe_srcdoc: bool,
/// Are we parsing a HTML fragment?
pub fragment: bool,
/// Should we drop the DOCTYPE (if any) from the tree?
pub drop_doctype: bool,
/// The `<svg>`, `<math>`, and `<template>` tags have special
/// parsing rules that are currently unimplemented. By default
/// we `panic!()` if any of these tags is encountered. If this
/// option is enabled, we will instead attempt to parse them
/// using the ordinary HTML parsing rules.
///
/// **Warning**: This may produce extremely incorrect results
/// on some documents!
pub ignore_missing_rules: bool,
}
impl Default for TreeBuilderOpts {
fn default() -> TreeBuilderOpts {
TreeBuilderOpts {
exact_errors: false,
scripting_enabled: true,
iframe_srcdoc: false,
fragment: false,
drop_doctype: false,
ignore_missing_rules: false,
}
}
}
/// The HTML tree builder.
pub struct TreeBuilder<Handle, Sink> {
/// Options controlling the behavior of the tree builder.
opts: TreeBuilderOpts,
/// Consumer of tree modifications.
sink: Sink,
/// Insertion mode.
mode: InsertionMode,
/// Original insertion mode, used by Text and InTableText modes.
orig_mode: Option<InsertionMode>,
/// Stack of template insertion modes.
template_modes: Vec<InsertionMode>,
/// Pending table character tokens.
pending_table_text: Vec<(SplitStatus, String)>,
/// Quirks mode as set by the parser.
/// FIXME: can scripts etc. change this?
quirks_mode: QuirksMode,
/// The document node, which is created by the sink.
doc_handle: Handle,
/// Stack of open elements, most recently added at end.
open_elems: Vec<Handle>,
/// List of active formatting elements.
active_formatting: Vec<FormatEntry<Handle>>,
//§ the-element-pointers
/// Head element pointer.
head_elem: Option<Handle>,
/// Form element pointer.
form_elem: Option<Handle>,
//§ END
/// Next state change for the tokenizer, if any.
next_tokenizer_state: Option<tokenizer::states::State>,
/// Frameset-ok flag.
frameset_ok: bool,
/// Ignore a following U+000A LINE FEED?
ignore_lf: bool,
/// Is foster parenting enabled?
foster_parenting: bool,
// WARNING: If you add new fields that contain Handles, you
// must add them to trace_handles() below to preserve memory
// safety!
//
// FIXME: Auto-generate the trace hooks like Servo does.
}
impl<Handle, Sink> TreeBuilder<Handle, Sink>
where Handle: Clone,
Sink: TreeSink<Handle=Handle>,
{
/// Create a new tree builder which sends tree modifications to a particular `TreeSink`.
///
/// The tree builder is also a `TokenSink`.
pub fn new(mut sink: Sink, opts: TreeBuilderOpts) -> TreeBuilder<Handle, Sink> {
let doc_handle = sink.get_document();
TreeBuilder {
opts: opts,
sink: sink,
mode: Initial,
orig_mode: None,
template_modes: vec!(),
pending_table_text: vec!(),
quirks_mode: NoQuirks,
doc_handle: doc_handle,
open_elems: vec!(),
active_formatting: vec!(),
head_elem: None,
form_elem: None,
next_tokenizer_state: None,
frameset_ok: true,
ignore_lf: false,
foster_parenting: false,
}
}
pub fn unwrap(self) -> Sink {
self.sink
}
pub fn sink<'a>(&'a self) -> &'a Sink {
&self.sink
}
pub fn sink_mut<'a>(&'a mut self) -> &'a mut Sink {
&mut self.sink
}
/// Call the `Tracer`'s `trace_handle` method on every `Handle` in the tree builder's
/// internal state. This is intended to support garbage-collected DOMs.
pub fn trace_handles(&self, tracer: &Tracer<Handle=Handle>) {
tracer.trace_handle(self.doc_handle.clone());
for e in self.open_elems.iter() {
tracer.trace_handle(e.clone());
}
for e in self.active_formatting.iter() {
match e {
&Element(ref h, _) => tracer.trace_handle(h.clone()),
_ => (),
}
}
self.head_elem.as_ref().map(|h| tracer.trace_handle(h.clone()));
self.form_elem.as_ref().map(|h| tracer.trace_handle(h.clone()));
}
// Debug helper
#[cfg(not(for_c))]
#[allow(dead_code)]
fn dump_state(&self, label: String) {
use string_cache::QualName;
println!("dump_state on {}", label);
print!(" open_elems:");
for node in self.open_elems.iter() {
let QualName { ns, local } = self.sink.elem_name(node.clone());
match ns {
ns!(HTML) => print!(" {:?}", local),
_ => panic!(),
}
}
println!("");
}
#[cfg(for_c)]
fn debug_step(&self, _mode: InsertionMode, _token: &Token) {
}
#[cfg(not(for_c))]
fn debug_step(&self, mode: InsertionMode, token: &Token) {
use util::str::to_escaped_string;
h5e_debug!("processing {} in insertion mode {:?}", to_escaped_string(token), mode);
}
fn process_to_completion(&mut self, mut token: Token) {
// Queue of additional tokens yet to be processed.
// This stays empty in the common case where we don't split whitespace.
let mut more_tokens = RingBuf::new();
loop {
let is_self_closing = match token {
TagToken(Tag { self_closing: c,.. }) => c,
_ => false,
};
let mode = self.mode;
match self.step(mode, token) {
Done => {
if is_self_closing {
self.sink.parse_error(Borrowed("Unacknowledged self-closing tag"));
}
token = unwrap_or_return!(more_tokens.pop_front(), ());
}
DoneAckSelfClosing => {
token = unwrap_or_return!(more_tokens.pop_front(), ());
}
Reprocess(m, t) => {
self.mode = m;
token = t;
}
SplitWhitespace(buf) => {
let buf = buf.as_slice();
let (len, is_ws) = unwrap_or_return!(
char_run(is_ascii_whitespace, buf), ());
token = CharacterTokens(
if is_ws { Whitespace } else { NotWhitespace },
String::from_str(buf.slice_to(len)));
if len < buf.len() {
more_tokens.push_back(
CharacterTokens(NotSplit, String::from_str(buf.slice_from(len))));
}
}
} |
impl<Handle, Sink> TokenSink
for TreeBuilder<Handle, Sink>
where Handle: Clone,
Sink: TreeSink<Handle=Handle>,
{
fn process_token(&mut self, token: tokenizer::Token) {
let ignore_lf = replace(&mut self.ignore_lf, false);
// Handle `ParseError` and `DoctypeToken`; convert everything else to the local `Token` type.
let token = match token {
tokenizer::ParseError(e) => {
self.sink.parse_error(e);
return;
}
tokenizer::DoctypeToken(dt) => if self.mode == Initial {
let (err, quirk) = data::doctype_error_and_quirks(&dt, self.opts.iframe_srcdoc);
if err {
self.sink.parse_error(format_if!(
self.opts.exact_errors,
"Bad DOCTYPE",
"Bad DOCTYPE: {:?}", dt));
}
let Doctype { name, public_id, system_id, force_quirks: _ } = dt;
if!self.opts.drop_doctype {
self.sink.append_doctype_to_document(
name.unwrap_or(String::new()),
public_id.unwrap_or(String::new()),
system_id.unwrap_or(String::new())
);
}
self.set_quirks_mode(quirk);
self.mode = BeforeHtml;
return;
} else {
self.sink.parse_error(format_if!(
self.opts.exact_errors,
"DOCTYPE in body",
"DOCTYPE in insertion mode {:?}", self.mode));
return;
},
tokenizer::TagToken(x) => TagToken(x),
tokenizer::CommentToken(x) => CommentToken(x),
tokenizer::NullCharacterToken => NullCharacterToken,
tokenizer::EOFToken => EOFToken,
tokenizer::CharacterTokens(mut x) => {
if ignore_lf && x.len() >= 1 && x.as_slice().char_at(0) == '\n' {
x.remove(0);
}
if x.is_empty() {
return;
}
CharacterTokens(NotSplit, x)
}
};
self.process_to_completion(token);
}
fn query_state_change(&mut self) -> Option<tokenizer::states::State> {
self.next_tokenizer_state.take()
}
} | }
}
} | random_line_split |
mod.rs | // Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The HTML5 tree builder.
use core::prelude::*;
pub use self::interface::{QuirksMode, Quirks, LimitedQuirks, NoQuirks};
pub use self::interface::{NodeOrText, AppendNode, AppendText};
pub use self::interface::{TreeSink, Tracer};
use self::types::*;
use self::actions::TreeBuilderActions;
use self::rules::TreeBuilderStep;
use tokenizer;
use tokenizer::{Doctype, Tag};
use tokenizer::TokenSink;
use util::str::{is_ascii_whitespace, char_run};
use core::default::Default;
use core::mem::replace;
use collections::vec::Vec;
use collections::string::String;
use std::borrow::Cow::Borrowed;
use collections::RingBuf;
#[macro_use] mod tag_sets;
mod interface;
mod data;
mod types;
mod actions;
mod rules;
/// Tree builder options, with an impl for Default.
#[derive(Copy, Clone)]
pub struct TreeBuilderOpts {
/// Report all parse errors described in the spec, at some
/// performance penalty? Default: false
pub exact_errors: bool,
/// Is scripting enabled?
pub scripting_enabled: bool,
/// Is this an `iframe srcdoc` document?
pub iframe_srcdoc: bool,
/// Are we parsing a HTML fragment?
pub fragment: bool,
/// Should we drop the DOCTYPE (if any) from the tree?
pub drop_doctype: bool,
/// The `<svg>`, `<math>`, and `<template>` tags have special
/// parsing rules that are currently unimplemented. By default
/// we `panic!()` if any of these tags is encountered. If this
/// option is enabled, we will instead attempt to parse them
/// using the ordinary HTML parsing rules.
///
/// **Warning**: This may produce extremely incorrect results
/// on some documents!
pub ignore_missing_rules: bool,
}
impl Default for TreeBuilderOpts {
fn default() -> TreeBuilderOpts {
TreeBuilderOpts {
exact_errors: false,
scripting_enabled: true,
iframe_srcdoc: false,
fragment: false,
drop_doctype: false,
ignore_missing_rules: false,
}
}
}
/// The HTML tree builder.
pub struct TreeBuilder<Handle, Sink> {
/// Options controlling the behavior of the tree builder.
opts: TreeBuilderOpts,
/// Consumer of tree modifications.
sink: Sink,
/// Insertion mode.
mode: InsertionMode,
/// Original insertion mode, used by Text and InTableText modes.
orig_mode: Option<InsertionMode>,
/// Stack of template insertion modes.
template_modes: Vec<InsertionMode>,
/// Pending table character tokens.
pending_table_text: Vec<(SplitStatus, String)>,
/// Quirks mode as set by the parser.
/// FIXME: can scripts etc. change this?
quirks_mode: QuirksMode,
/// The document node, which is created by the sink.
doc_handle: Handle,
/// Stack of open elements, most recently added at end.
open_elems: Vec<Handle>,
/// List of active formatting elements.
active_formatting: Vec<FormatEntry<Handle>>,
//§ the-element-pointers
/// Head element pointer.
head_elem: Option<Handle>,
/// Form element pointer.
form_elem: Option<Handle>,
//§ END
/// Next state change for the tokenizer, if any.
next_tokenizer_state: Option<tokenizer::states::State>,
/// Frameset-ok flag.
frameset_ok: bool,
/// Ignore a following U+000A LINE FEED?
ignore_lf: bool,
/// Is foster parenting enabled?
foster_parenting: bool,
// WARNING: If you add new fields that contain Handles, you
// must add them to trace_handles() below to preserve memory
// safety!
//
// FIXME: Auto-generate the trace hooks like Servo does.
}
impl<Handle, Sink> TreeBuilder<Handle, Sink>
where Handle: Clone,
Sink: TreeSink<Handle=Handle>,
{
/// Create a new tree builder which sends tree modifications to a particular `TreeSink`.
///
/// The tree builder is also a `TokenSink`.
pub fn new(mut sink: Sink, opts: TreeBuilderOpts) -> TreeBuilder<Handle, Sink> {
let doc_handle = sink.get_document();
TreeBuilder {
opts: opts,
sink: sink,
mode: Initial,
orig_mode: None,
template_modes: vec!(),
pending_table_text: vec!(),
quirks_mode: NoQuirks,
doc_handle: doc_handle,
open_elems: vec!(),
active_formatting: vec!(),
head_elem: None,
form_elem: None,
next_tokenizer_state: None,
frameset_ok: true,
ignore_lf: false,
foster_parenting: false,
}
}
pub fn unwrap(self) -> Sink {
self.sink
}
pub fn sink<'a>(&'a self) -> &'a Sink {
&self.sink
}
pub fn sink_mut<'a>(&'a mut self) -> &'a mut Sink {
&mut self.sink
}
/// Call the `Tracer`'s `trace_handle` method on every `Handle` in the tree builder's
/// internal state. This is intended to support garbage-collected DOMs.
pub fn trace_handles(&self, tracer: &Tracer<Handle=Handle>) {
tracer.trace_handle(self.doc_handle.clone());
for e in self.open_elems.iter() {
tracer.trace_handle(e.clone());
}
for e in self.active_formatting.iter() {
match e {
&Element(ref h, _) => tracer.trace_handle(h.clone()),
_ => (),
}
}
self.head_elem.as_ref().map(|h| tracer.trace_handle(h.clone()));
self.form_elem.as_ref().map(|h| tracer.trace_handle(h.clone()));
}
// Debug helper
#[cfg(not(for_c))]
#[allow(dead_code)]
fn dump_state(&self, label: String) {
use string_cache::QualName;
println!("dump_state on {}", label);
print!(" open_elems:");
for node in self.open_elems.iter() {
let QualName { ns, local } = self.sink.elem_name(node.clone());
match ns {
ns!(HTML) => print!(" {:?}", local),
_ => panic!(),
}
}
println!("");
}
#[cfg(for_c)]
fn debug_step(&self, _mode: InsertionMode, _token: &Token) {
}
#[cfg(not(for_c))]
fn debug_step(&self, mode: InsertionMode, token: &Token) {
use util::str::to_escaped_string;
h5e_debug!("processing {} in insertion mode {:?}", to_escaped_string(token), mode);
}
fn process_to_completion(&mut self, mut token: Token) {
// Queue of additional tokens yet to be processed.
// This stays empty in the common case where we don't split whitespace.
let mut more_tokens = RingBuf::new();
loop {
let is_self_closing = match token {
TagToken(Tag { self_closing: c,.. }) => c,
_ => false,
};
let mode = self.mode;
match self.step(mode, token) {
Done => {
if is_self_closing {
self.sink.parse_error(Borrowed("Unacknowledged self-closing tag"));
}
token = unwrap_or_return!(more_tokens.pop_front(), ());
}
DoneAckSelfClosing => {
token = unwrap_or_return!(more_tokens.pop_front(), ());
}
Reprocess(m, t) => {
self.mode = m;
token = t;
}
SplitWhitespace(buf) => {
let buf = buf.as_slice();
let (len, is_ws) = unwrap_or_return!(
char_run(is_ascii_whitespace, buf), ());
token = CharacterTokens(
if is_ws { Whitespace } else { NotWhitespace },
String::from_str(buf.slice_to(len)));
if len < buf.len() {
more_tokens.push_back(
CharacterTokens(NotSplit, String::from_str(buf.slice_from(len))));
}
}
}
}
}
}
impl<Handle, Sink> TokenSink
for TreeBuilder<Handle, Sink>
where Handle: Clone,
Sink: TreeSink<Handle=Handle>,
{
fn process_token(&mut self, token: tokenizer::Token) {
let ignore_lf = replace(&mut self.ignore_lf, false);
// Handle `ParseError` and `DoctypeToken`; convert everything else to the local `Token` type.
let token = match token {
tokenizer::ParseError(e) => {
self.sink.parse_error(e);
return;
}
tokenizer::DoctypeToken(dt) => if self.mode == Initial {
let (err, quirk) = data::doctype_error_and_quirks(&dt, self.opts.iframe_srcdoc);
if err {
self.sink.parse_error(format_if!(
self.opts.exact_errors,
"Bad DOCTYPE",
"Bad DOCTYPE: {:?}", dt));
}
let Doctype { name, public_id, system_id, force_quirks: _ } = dt;
if!self.opts.drop_doctype {
self.sink.append_doctype_to_document(
name.unwrap_or(String::new()),
public_id.unwrap_or(String::new()),
system_id.unwrap_or(String::new())
);
}
self.set_quirks_mode(quirk);
self.mode = BeforeHtml;
return;
} else {
self.sink.parse_error(format_if!(
self.opts.exact_errors,
"DOCTYPE in body",
"DOCTYPE in insertion mode {:?}", self.mode));
return;
},
tokenizer::TagToken(x) => TagToken(x),
tokenizer::CommentToken(x) => CommentToken(x),
tokenizer::NullCharacterToken => NullCharacterToken,
tokenizer::EOFToken => EOFToken,
tokenizer::CharacterTokens(mut x) => {
if ignore_lf && x.len() >= 1 && x.as_slice().char_at(0) == '\n' {
x.remove(0);
}
if x.is_empty() {
return;
}
CharacterTokens(NotSplit, x)
}
};
self.process_to_completion(token);
}
fn query_state_change(&mut self) -> Option<tokenizer::states::State> {
| self.next_tokenizer_state.take()
}
} | identifier_body |
|
mime.rs | use std::{error::Error as StdError, io::Write};
use diesel::{backend::Backend, deserialize, serialize, sql_types::Text};
use mime::Mime as OrigMime;
#[derive(AsExpression, Debug, FromSqlRow)]
#[sql_type = "Text"]
pub struct Mime(pub OrigMime);
impl<DB> serialize::ToSql<Text, DB> for Mime
where
DB: Backend,
{
fn to_sql<W: Write>(&self, out: &mut serialize::Output<W, DB>) -> serialize::Result {
serialize::ToSql::<Text, DB>::to_sql(self.0.as_ref(), out)
}
}
impl<DB> deserialize::FromSql<Text, DB> for Mime
where
DB: Backend<RawValue = [u8]>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
deserialize::FromSql::<Text, DB>::from_sql(bytes).and_then(|s: String| {
s.parse()
.map(Mime)
.map_err(|e| Box::new(e) as Box<StdError + Send + Sync>)
})
}
}
impl From<OrigMime> for Mime {
fn from(u: OrigMime) -> Self |
}
| {
Mime(u)
} | identifier_body |
mime.rs | use std::{error::Error as StdError, io::Write};
use diesel::{backend::Backend, deserialize, serialize, sql_types::Text};
use mime::Mime as OrigMime;
#[derive(AsExpression, Debug, FromSqlRow)]
#[sql_type = "Text"]
pub struct Mime(pub OrigMime);
impl<DB> serialize::ToSql<Text, DB> for Mime
where
DB: Backend,
{
fn to_sql<W: Write>(&self, out: &mut serialize::Output<W, DB>) -> serialize::Result {
serialize::ToSql::<Text, DB>::to_sql(self.0.as_ref(), out)
}
}
impl<DB> deserialize::FromSql<Text, DB> for Mime
where
DB: Backend<RawValue = [u8]>,
{ | })
}
}
impl From<OrigMime> for Mime {
fn from(u: OrigMime) -> Self {
Mime(u)
}
} | fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
deserialize::FromSql::<Text, DB>::from_sql(bytes).and_then(|s: String| {
s.parse()
.map(Mime)
.map_err(|e| Box::new(e) as Box<StdError + Send + Sync>) | random_line_split |
mime.rs | use std::{error::Error as StdError, io::Write};
use diesel::{backend::Backend, deserialize, serialize, sql_types::Text};
use mime::Mime as OrigMime;
#[derive(AsExpression, Debug, FromSqlRow)]
#[sql_type = "Text"]
pub struct Mime(pub OrigMime);
impl<DB> serialize::ToSql<Text, DB> for Mime
where
DB: Backend,
{
fn | <W: Write>(&self, out: &mut serialize::Output<W, DB>) -> serialize::Result {
serialize::ToSql::<Text, DB>::to_sql(self.0.as_ref(), out)
}
}
impl<DB> deserialize::FromSql<Text, DB> for Mime
where
DB: Backend<RawValue = [u8]>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
deserialize::FromSql::<Text, DB>::from_sql(bytes).and_then(|s: String| {
s.parse()
.map(Mime)
.map_err(|e| Box::new(e) as Box<StdError + Send + Sync>)
})
}
}
impl From<OrigMime> for Mime {
fn from(u: OrigMime) -> Self {
Mime(u)
}
}
| to_sql | identifier_name |
pagetransitionevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::PageTransitionEventBinding;
use crate::dom::bindings::codegen::Bindings::PageTransitionEventBinding::PageTransitionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::event::Event;
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
// https://html.spec.whatwg.org/multipage/#pagetransitionevent
#[dom_struct]
pub struct PageTransitionEvent {
event: Event,
persisted: Cell<bool>,
}
impl PageTransitionEvent {
fn new_inherited() -> PageTransitionEvent {
PageTransitionEvent {
event: Event::new_inherited(),
persisted: Cell::new(false),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<PageTransitionEvent> {
reflect_dom_object(
Box::new(PageTransitionEvent::new_inherited()),
window,
PageTransitionEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: Atom,
bubbles: bool,
cancelable: bool,
persisted: bool,
) -> DomRoot<PageTransitionEvent> {
let ev = PageTransitionEvent::new_uninitialized(window);
ev.persisted.set(persisted);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
ev
}
#[allow(non_snake_case)]
pub fn Constructor( | type_: DOMString,
init: &PageTransitionEventBinding::PageTransitionEventInit,
) -> Fallible<DomRoot<PageTransitionEvent>> {
Ok(PageTransitionEvent::new(
window,
Atom::from(type_),
init.parent.bubbles,
init.parent.cancelable,
init.persisted,
))
}
}
impl PageTransitionEventMethods for PageTransitionEvent {
// https://html.spec.whatwg.org/multipage/#dom-pagetransitionevent-persisted
fn Persisted(&self) -> bool {
self.persisted.get()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
} | window: &Window, | random_line_split |
pagetransitionevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::PageTransitionEventBinding;
use crate::dom::bindings::codegen::Bindings::PageTransitionEventBinding::PageTransitionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::event::Event;
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
// https://html.spec.whatwg.org/multipage/#pagetransitionevent
#[dom_struct]
pub struct PageTransitionEvent {
event: Event,
persisted: Cell<bool>,
}
impl PageTransitionEvent {
fn new_inherited() -> PageTransitionEvent {
PageTransitionEvent {
event: Event::new_inherited(),
persisted: Cell::new(false),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<PageTransitionEvent> {
reflect_dom_object(
Box::new(PageTransitionEvent::new_inherited()),
window,
PageTransitionEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: Atom,
bubbles: bool,
cancelable: bool,
persisted: bool,
) -> DomRoot<PageTransitionEvent> {
let ev = PageTransitionEvent::new_uninitialized(window);
ev.persisted.set(persisted);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
ev
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &PageTransitionEventBinding::PageTransitionEventInit,
) -> Fallible<DomRoot<PageTransitionEvent>> {
Ok(PageTransitionEvent::new(
window,
Atom::from(type_),
init.parent.bubbles,
init.parent.cancelable,
init.persisted,
))
}
}
impl PageTransitionEventMethods for PageTransitionEvent {
// https://html.spec.whatwg.org/multipage/#dom-pagetransitionevent-persisted
fn Persisted(&self) -> bool {
self.persisted.get()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn | (&self) -> bool {
self.event.IsTrusted()
}
}
| IsTrusted | identifier_name |
pagetransitionevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::PageTransitionEventBinding;
use crate::dom::bindings::codegen::Bindings::PageTransitionEventBinding::PageTransitionEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::event::Event;
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
// https://html.spec.whatwg.org/multipage/#pagetransitionevent
#[dom_struct]
pub struct PageTransitionEvent {
event: Event,
persisted: Cell<bool>,
}
impl PageTransitionEvent {
fn new_inherited() -> PageTransitionEvent {
PageTransitionEvent {
event: Event::new_inherited(),
persisted: Cell::new(false),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<PageTransitionEvent> {
reflect_dom_object(
Box::new(PageTransitionEvent::new_inherited()),
window,
PageTransitionEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: Atom,
bubbles: bool,
cancelable: bool,
persisted: bool,
) -> DomRoot<PageTransitionEvent> {
let ev = PageTransitionEvent::new_uninitialized(window);
ev.persisted.set(persisted);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
ev
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &PageTransitionEventBinding::PageTransitionEventInit,
) -> Fallible<DomRoot<PageTransitionEvent>> |
}
impl PageTransitionEventMethods for PageTransitionEvent {
// https://html.spec.whatwg.org/multipage/#dom-pagetransitionevent-persisted
fn Persisted(&self) -> bool {
self.persisted.get()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
| {
Ok(PageTransitionEvent::new(
window,
Atom::from(type_),
init.parent.bubbles,
init.parent.cancelable,
init.persisted,
))
} | identifier_body |
exec.rs | use std::{env,
ffi::OsString,
io,
path::PathBuf};
use crate::hcore::{fs::{find_command,
FS_ROOT_PATH},
os::process,
package::{PackageIdent,
PackageInstall}};
use crate::error::{Error,
Result};
const PATH_KEY: &str = "PATH";
pub fn start<T>(ident: &PackageIdent, command: T, args: &[OsString]) -> Result<()>
where T: Into<PathBuf>
| for (key, value) in cmd_env.into_iter() {
debug!("Setting: {}='{}'", key, value);
env::set_var(key, value);
}
let command = match find_command(&command) {
Some(path) => path,
None => return Err(Error::ExecCommandNotFound(command)),
};
let mut display_args = command.to_string_lossy().into_owned();
for arg in args {
display_args.push(' ');
display_args.push_str(arg.to_string_lossy().as_ref());
}
debug!("Running: {}", display_args);
process::become_command(command, args)?;
Ok(())
}
| {
let command = command.into();
let pkg_install = PackageInstall::load(&ident, Some(&*FS_ROOT_PATH))?;
let mut cmd_env = pkg_install.environment_for_command()?;
if let Some(path) = cmd_env.get(PATH_KEY) {
if let Some(val) = env::var_os(PATH_KEY) {
let mut paths: Vec<PathBuf> = env::split_paths(&path).collect();
let mut os_paths = env::split_paths(&val).collect();
paths.append(&mut os_paths);
let joined = env::join_paths(paths)?;
let path_str =
joined.into_string()
.map_err(|s| {
io::Error::new(io::ErrorKind::InvalidData, s.to_string_lossy())
})?;
cmd_env.insert(PATH_KEY.to_string(), path_str);
}
}
| identifier_body |
exec.rs | use std::{env,
ffi::OsString,
io,
path::PathBuf};
use crate::hcore::{fs::{find_command,
FS_ROOT_PATH},
os::process,
package::{PackageIdent,
PackageInstall}};
use crate::error::{Error,
Result};
const PATH_KEY: &str = "PATH";
pub fn start<T>(ident: &PackageIdent, command: T, args: &[OsString]) -> Result<()>
where T: Into<PathBuf>
{
let command = command.into();
let pkg_install = PackageInstall::load(&ident, Some(&*FS_ROOT_PATH))?;
let mut cmd_env = pkg_install.environment_for_command()?;
if let Some(path) = cmd_env.get(PATH_KEY) |
for (key, value) in cmd_env.into_iter() {
debug!("Setting: {}='{}'", key, value);
env::set_var(key, value);
}
let command = match find_command(&command) {
Some(path) => path,
None => return Err(Error::ExecCommandNotFound(command)),
};
let mut display_args = command.to_string_lossy().into_owned();
for arg in args {
display_args.push(' ');
display_args.push_str(arg.to_string_lossy().as_ref());
}
debug!("Running: {}", display_args);
process::become_command(command, args)?;
Ok(())
}
| {
if let Some(val) = env::var_os(PATH_KEY) {
let mut paths: Vec<PathBuf> = env::split_paths(&path).collect();
let mut os_paths = env::split_paths(&val).collect();
paths.append(&mut os_paths);
let joined = env::join_paths(paths)?;
let path_str =
joined.into_string()
.map_err(|s| {
io::Error::new(io::ErrorKind::InvalidData, s.to_string_lossy())
})?;
cmd_env.insert(PATH_KEY.to_string(), path_str);
}
} | conditional_block |
exec.rs | use std::{env,
ffi::OsString,
io,
path::PathBuf};
use crate::hcore::{fs::{find_command,
FS_ROOT_PATH},
os::process,
package::{PackageIdent,
PackageInstall}};
use crate::error::{Error,
Result};
const PATH_KEY: &str = "PATH";
pub fn start<T>(ident: &PackageIdent, command: T, args: &[OsString]) -> Result<()>
where T: Into<PathBuf>
{
let command = command.into();
let pkg_install = PackageInstall::load(&ident, Some(&*FS_ROOT_PATH))?;
let mut cmd_env = pkg_install.environment_for_command()?;
| let mut paths: Vec<PathBuf> = env::split_paths(&path).collect();
let mut os_paths = env::split_paths(&val).collect();
paths.append(&mut os_paths);
let joined = env::join_paths(paths)?;
let path_str =
joined.into_string()
.map_err(|s| {
io::Error::new(io::ErrorKind::InvalidData, s.to_string_lossy())
})?;
cmd_env.insert(PATH_KEY.to_string(), path_str);
}
}
for (key, value) in cmd_env.into_iter() {
debug!("Setting: {}='{}'", key, value);
env::set_var(key, value);
}
let command = match find_command(&command) {
Some(path) => path,
None => return Err(Error::ExecCommandNotFound(command)),
};
let mut display_args = command.to_string_lossy().into_owned();
for arg in args {
display_args.push(' ');
display_args.push_str(arg.to_string_lossy().as_ref());
}
debug!("Running: {}", display_args);
process::become_command(command, args)?;
Ok(())
} | if let Some(path) = cmd_env.get(PATH_KEY) {
if let Some(val) = env::var_os(PATH_KEY) { | random_line_split |
exec.rs | use std::{env,
ffi::OsString,
io,
path::PathBuf};
use crate::hcore::{fs::{find_command,
FS_ROOT_PATH},
os::process,
package::{PackageIdent,
PackageInstall}};
use crate::error::{Error,
Result};
const PATH_KEY: &str = "PATH";
pub fn | <T>(ident: &PackageIdent, command: T, args: &[OsString]) -> Result<()>
where T: Into<PathBuf>
{
let command = command.into();
let pkg_install = PackageInstall::load(&ident, Some(&*FS_ROOT_PATH))?;
let mut cmd_env = pkg_install.environment_for_command()?;
if let Some(path) = cmd_env.get(PATH_KEY) {
if let Some(val) = env::var_os(PATH_KEY) {
let mut paths: Vec<PathBuf> = env::split_paths(&path).collect();
let mut os_paths = env::split_paths(&val).collect();
paths.append(&mut os_paths);
let joined = env::join_paths(paths)?;
let path_str =
joined.into_string()
.map_err(|s| {
io::Error::new(io::ErrorKind::InvalidData, s.to_string_lossy())
})?;
cmd_env.insert(PATH_KEY.to_string(), path_str);
}
}
for (key, value) in cmd_env.into_iter() {
debug!("Setting: {}='{}'", key, value);
env::set_var(key, value);
}
let command = match find_command(&command) {
Some(path) => path,
None => return Err(Error::ExecCommandNotFound(command)),
};
let mut display_args = command.to_string_lossy().into_owned();
for arg in args {
display_args.push(' ');
display_args.push_str(arg.to_string_lossy().as_ref());
}
debug!("Running: {}", display_args);
process::become_command(command, args)?;
Ok(())
}
| start | identifier_name |
pact_support.rs | use http::{HeaderMap, Uri, Error};
use http::header::{ACCESS_CONTROL_ALLOW_ORIGIN, CONTENT_TYPE};
use http::header::HeaderValue;
use http::request::Parts;
use hyper::{Body, Response as HyperResponse};
use pact_matching::models::{HttpPart, OptionalBody, Request, Response};
use pact_matching::models::parse_query_string;
use std::collections::HashMap;
use log::*;
use pact_matching::s;
use pact_matching::models::content_types::TEXT;
fn extract_query_string(uri: &Uri) -> Option<HashMap<String, Vec<String>>> {
match uri.query() {
Some(q) => parse_query_string(&s!(q)),
None => None
}
}
fn extract_headers(headers: &HeaderMap<HeaderValue>) -> Option<HashMap<String, Vec<String>>> {
if!headers.is_empty() {
let result: HashMap<String, Vec<String>> = headers.keys()
.map(|name| {
let values = headers.get_all(name);
let parsed_vals: Vec<Result<String, ()>> = values.iter()
.map(|val| val.to_str()
.map(|v| v.to_string())
.map_err(|err| {
warn!("Failed to parse HTTP header value: {}", err);
})
).collect();
(name.as_str().into(), parsed_vals.iter().cloned()
.filter(|val| val.is_ok())
.map(|val| val.unwrap_or_default())
.collect())
})
.collect();
Some(result)
} else {
None
}
}
pub fn hyper_request_to_pact_request(req: Parts, body: OptionalBody) -> Request {
Request {
method: req.method.to_string(),
path: req.uri.path().to_string(),
query: extract_query_string(&req.uri),
headers: extract_headers(&req.headers),
body,
.. Request::default()
}
}
pub fn pact_response_to_hyper_response(response: &Response) -> Result<HyperResponse<Body>, Error> {
info!("<=== Sending {}", response);
debug!(" body: '{}'", response.body.str_value());
debug!(" matching_rules: {:?}", response.matching_rules);
debug!(" generators: {:?}", response.generators);
let mut res = HyperResponse::builder().status(response.status);
if let Some(headers) = &response.headers {
for (k, v) in headers.clone() {
for val in v {
res = res.header(k.as_str(), val);
}
}
}
let allow_origin = ACCESS_CONTROL_ALLOW_ORIGIN;
if!response.has_header(allow_origin.as_str()) |
match &response.body {
OptionalBody::Present(ref body, content_type) => {
let content_type_header = CONTENT_TYPE;
if!response.has_header(content_type_header.as_str()) {
let content_type = content_type.clone()
.unwrap_or_else(|| response.content_type().unwrap_or_else(|| TEXT.clone()));
res = res.header(content_type_header, content_type.to_string());
}
res.body(Body::from(body.clone()))
},
_ => res.body(Body::empty())
}
}
#[cfg(test)]
mod test {
use expectest::prelude::*;
use http::header::HeaderValue;
use http::status::StatusCode;
use pact_matching::models::{OptionalBody, Response};
use super::*;
use maplit::*;
#[test]
fn test_response() {
let response = Response {
status: 201,
headers: Some(hashmap! { }),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.status()).to(be_equal_to(StatusCode::CREATED));
expect!(hyper_response.headers().len()).to(be_equal_to(1));
expect!(hyper_response.headers().get("Access-Control-Allow-Origin")).to(be_some().value(HeaderValue::from_static("*")));
}
#[test]
fn test_response_with_content_type() {
let response = Response {
status: 201,
headers: Some(hashmap! { s!("Content-Type") => vec![s!("text/dizzy")] }),
body: OptionalBody::Present("{\"a\": 1, \"b\": 4, \"c\": 6}".as_bytes().into(), None),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.status()).to(be_equal_to(StatusCode::CREATED));
expect!(hyper_response.headers().is_empty()).to(be_false());
expect!(hyper_response.headers().get("content-type")).to(be_some().value(HeaderValue::from_static("text/dizzy")));
}
#[test]
fn adds_a_content_type_if_there_is_not_one_and_there_is_a_body() {
let response = Response {
body: OptionalBody::Present("{\"a\": 1, \"b\": 4, \"c\": 6}".as_bytes().into(), None),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.headers().is_empty()).to(be_false());
expect!(hyper_response.headers().get("content-type")).to(be_some().value(HeaderValue::from_static("application/json")));
}
#[test]
fn only_add_a_cors_origin_header_if_one_has_not_already_been_provided() {
let response = Response {
headers: Some(hashmap! { s!("Access-Control-Allow-Origin") => vec![s!("dodgy.com")] }),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.headers().len()).to(be_equal_to(1));
expect!(hyper_response.headers().get("Access-Control-Allow-Origin")).to(be_some().value(HeaderValue::from_static("dodgy.com")));
}
}
| {
res = res.header(allow_origin, "*");
} | conditional_block |
pact_support.rs | use http::{HeaderMap, Uri, Error};
use http::header::{ACCESS_CONTROL_ALLOW_ORIGIN, CONTENT_TYPE};
use http::header::HeaderValue;
use http::request::Parts;
use hyper::{Body, Response as HyperResponse};
use pact_matching::models::{HttpPart, OptionalBody, Request, Response};
use pact_matching::models::parse_query_string;
use std::collections::HashMap;
use log::*;
use pact_matching::s;
use pact_matching::models::content_types::TEXT;
fn extract_query_string(uri: &Uri) -> Option<HashMap<String, Vec<String>>> {
match uri.query() {
Some(q) => parse_query_string(&s!(q)),
None => None
}
}
fn extract_headers(headers: &HeaderMap<HeaderValue>) -> Option<HashMap<String, Vec<String>>> {
if!headers.is_empty() {
let result: HashMap<String, Vec<String>> = headers.keys()
.map(|name| {
let values = headers.get_all(name);
let parsed_vals: Vec<Result<String, ()>> = values.iter()
.map(|val| val.to_str()
.map(|v| v.to_string())
.map_err(|err| {
warn!("Failed to parse HTTP header value: {}", err);
})
).collect();
(name.as_str().into(), parsed_vals.iter().cloned()
.filter(|val| val.is_ok())
.map(|val| val.unwrap_or_default())
.collect())
})
.collect();
Some(result)
} else {
None
}
}
pub fn | (req: Parts, body: OptionalBody) -> Request {
Request {
method: req.method.to_string(),
path: req.uri.path().to_string(),
query: extract_query_string(&req.uri),
headers: extract_headers(&req.headers),
body,
.. Request::default()
}
}
pub fn pact_response_to_hyper_response(response: &Response) -> Result<HyperResponse<Body>, Error> {
info!("<=== Sending {}", response);
debug!(" body: '{}'", response.body.str_value());
debug!(" matching_rules: {:?}", response.matching_rules);
debug!(" generators: {:?}", response.generators);
let mut res = HyperResponse::builder().status(response.status);
if let Some(headers) = &response.headers {
for (k, v) in headers.clone() {
for val in v {
res = res.header(k.as_str(), val);
}
}
}
let allow_origin = ACCESS_CONTROL_ALLOW_ORIGIN;
if!response.has_header(allow_origin.as_str()) {
res = res.header(allow_origin, "*");
}
match &response.body {
OptionalBody::Present(ref body, content_type) => {
let content_type_header = CONTENT_TYPE;
if!response.has_header(content_type_header.as_str()) {
let content_type = content_type.clone()
.unwrap_or_else(|| response.content_type().unwrap_or_else(|| TEXT.clone()));
res = res.header(content_type_header, content_type.to_string());
}
res.body(Body::from(body.clone()))
},
_ => res.body(Body::empty())
}
}
#[cfg(test)]
mod test {
use expectest::prelude::*;
use http::header::HeaderValue;
use http::status::StatusCode;
use pact_matching::models::{OptionalBody, Response};
use super::*;
use maplit::*;
#[test]
fn test_response() {
let response = Response {
status: 201,
headers: Some(hashmap! { }),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.status()).to(be_equal_to(StatusCode::CREATED));
expect!(hyper_response.headers().len()).to(be_equal_to(1));
expect!(hyper_response.headers().get("Access-Control-Allow-Origin")).to(be_some().value(HeaderValue::from_static("*")));
}
#[test]
fn test_response_with_content_type() {
let response = Response {
status: 201,
headers: Some(hashmap! { s!("Content-Type") => vec![s!("text/dizzy")] }),
body: OptionalBody::Present("{\"a\": 1, \"b\": 4, \"c\": 6}".as_bytes().into(), None),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.status()).to(be_equal_to(StatusCode::CREATED));
expect!(hyper_response.headers().is_empty()).to(be_false());
expect!(hyper_response.headers().get("content-type")).to(be_some().value(HeaderValue::from_static("text/dizzy")));
}
#[test]
fn adds_a_content_type_if_there_is_not_one_and_there_is_a_body() {
let response = Response {
body: OptionalBody::Present("{\"a\": 1, \"b\": 4, \"c\": 6}".as_bytes().into(), None),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.headers().is_empty()).to(be_false());
expect!(hyper_response.headers().get("content-type")).to(be_some().value(HeaderValue::from_static("application/json")));
}
#[test]
fn only_add_a_cors_origin_header_if_one_has_not_already_been_provided() {
let response = Response {
headers: Some(hashmap! { s!("Access-Control-Allow-Origin") => vec![s!("dodgy.com")] }),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.headers().len()).to(be_equal_to(1));
expect!(hyper_response.headers().get("Access-Control-Allow-Origin")).to(be_some().value(HeaderValue::from_static("dodgy.com")));
}
}
| hyper_request_to_pact_request | identifier_name |
pact_support.rs | use http::{HeaderMap, Uri, Error};
use http::header::{ACCESS_CONTROL_ALLOW_ORIGIN, CONTENT_TYPE};
use http::header::HeaderValue;
use http::request::Parts;
use hyper::{Body, Response as HyperResponse};
use pact_matching::models::{HttpPart, OptionalBody, Request, Response};
use pact_matching::models::parse_query_string;
use std::collections::HashMap;
use log::*;
use pact_matching::s;
use pact_matching::models::content_types::TEXT;
fn extract_query_string(uri: &Uri) -> Option<HashMap<String, Vec<String>>> {
match uri.query() {
Some(q) => parse_query_string(&s!(q)),
None => None
}
}
fn extract_headers(headers: &HeaderMap<HeaderValue>) -> Option<HashMap<String, Vec<String>>> {
if!headers.is_empty() {
let result: HashMap<String, Vec<String>> = headers.keys()
.map(|name| {
let values = headers.get_all(name);
let parsed_vals: Vec<Result<String, ()>> = values.iter()
.map(|val| val.to_str()
.map(|v| v.to_string())
.map_err(|err| {
warn!("Failed to parse HTTP header value: {}", err);
})
).collect();
(name.as_str().into(), parsed_vals.iter().cloned()
.filter(|val| val.is_ok())
.map(|val| val.unwrap_or_default())
.collect())
})
.collect();
Some(result)
} else {
None
}
}
pub fn hyper_request_to_pact_request(req: Parts, body: OptionalBody) -> Request {
Request {
method: req.method.to_string(),
path: req.uri.path().to_string(),
query: extract_query_string(&req.uri),
headers: extract_headers(&req.headers),
body,
.. Request::default()
}
}
pub fn pact_response_to_hyper_response(response: &Response) -> Result<HyperResponse<Body>, Error> {
info!("<=== Sending {}", response);
debug!(" body: '{}'", response.body.str_value());
debug!(" matching_rules: {:?}", response.matching_rules);
debug!(" generators: {:?}", response.generators);
let mut res = HyperResponse::builder().status(response.status);
if let Some(headers) = &response.headers {
for (k, v) in headers.clone() {
for val in v {
res = res.header(k.as_str(), val);
}
}
}
let allow_origin = ACCESS_CONTROL_ALLOW_ORIGIN;
if!response.has_header(allow_origin.as_str()) {
res = res.header(allow_origin, "*");
}
match &response.body {
OptionalBody::Present(ref body, content_type) => {
let content_type_header = CONTENT_TYPE;
if!response.has_header(content_type_header.as_str()) {
let content_type = content_type.clone()
.unwrap_or_else(|| response.content_type().unwrap_or_else(|| TEXT.clone()));
res = res.header(content_type_header, content_type.to_string());
}
res.body(Body::from(body.clone()))
},
_ => res.body(Body::empty())
}
}
#[cfg(test)]
mod test {
use expectest::prelude::*;
use http::header::HeaderValue;
use http::status::StatusCode;
use pact_matching::models::{OptionalBody, Response};
use super::*;
use maplit::*;
#[test]
fn test_response() {
let response = Response {
status: 201,
headers: Some(hashmap! { }),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.status()).to(be_equal_to(StatusCode::CREATED));
expect!(hyper_response.headers().len()).to(be_equal_to(1));
expect!(hyper_response.headers().get("Access-Control-Allow-Origin")).to(be_some().value(HeaderValue::from_static("*")));
}
#[test]
fn test_response_with_content_type() {
let response = Response {
status: 201,
headers: Some(hashmap! { s!("Content-Type") => vec![s!("text/dizzy")] }),
body: OptionalBody::Present("{\"a\": 1, \"b\": 4, \"c\": 6}".as_bytes().into(), None),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.status()).to(be_equal_to(StatusCode::CREATED));
expect!(hyper_response.headers().is_empty()).to(be_false());
expect!(hyper_response.headers().get("content-type")).to(be_some().value(HeaderValue::from_static("text/dizzy")));
}
#[test]
fn adds_a_content_type_if_there_is_not_one_and_there_is_a_body() {
let response = Response {
body: OptionalBody::Present("{\"a\": 1, \"b\": 4, \"c\": 6}".as_bytes().into(), None),
.. Response::default()
}; |
#[test]
fn only_add_a_cors_origin_header_if_one_has_not_already_been_provided() {
let response = Response {
headers: Some(hashmap! { s!("Access-Control-Allow-Origin") => vec![s!("dodgy.com")] }),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.headers().len()).to(be_equal_to(1));
expect!(hyper_response.headers().get("Access-Control-Allow-Origin")).to(be_some().value(HeaderValue::from_static("dodgy.com")));
}
} | let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.headers().is_empty()).to(be_false());
expect!(hyper_response.headers().get("content-type")).to(be_some().value(HeaderValue::from_static("application/json")));
} | random_line_split |
pact_support.rs | use http::{HeaderMap, Uri, Error};
use http::header::{ACCESS_CONTROL_ALLOW_ORIGIN, CONTENT_TYPE};
use http::header::HeaderValue;
use http::request::Parts;
use hyper::{Body, Response as HyperResponse};
use pact_matching::models::{HttpPart, OptionalBody, Request, Response};
use pact_matching::models::parse_query_string;
use std::collections::HashMap;
use log::*;
use pact_matching::s;
use pact_matching::models::content_types::TEXT;
fn extract_query_string(uri: &Uri) -> Option<HashMap<String, Vec<String>>> {
match uri.query() {
Some(q) => parse_query_string(&s!(q)),
None => None
}
}
fn extract_headers(headers: &HeaderMap<HeaderValue>) -> Option<HashMap<String, Vec<String>>> {
if!headers.is_empty() {
let result: HashMap<String, Vec<String>> = headers.keys()
.map(|name| {
let values = headers.get_all(name);
let parsed_vals: Vec<Result<String, ()>> = values.iter()
.map(|val| val.to_str()
.map(|v| v.to_string())
.map_err(|err| {
warn!("Failed to parse HTTP header value: {}", err);
})
).collect();
(name.as_str().into(), parsed_vals.iter().cloned()
.filter(|val| val.is_ok())
.map(|val| val.unwrap_or_default())
.collect())
})
.collect();
Some(result)
} else {
None
}
}
pub fn hyper_request_to_pact_request(req: Parts, body: OptionalBody) -> Request {
Request {
method: req.method.to_string(),
path: req.uri.path().to_string(),
query: extract_query_string(&req.uri),
headers: extract_headers(&req.headers),
body,
.. Request::default()
}
}
pub fn pact_response_to_hyper_response(response: &Response) -> Result<HyperResponse<Body>, Error> {
info!("<=== Sending {}", response);
debug!(" body: '{}'", response.body.str_value());
debug!(" matching_rules: {:?}", response.matching_rules);
debug!(" generators: {:?}", response.generators);
let mut res = HyperResponse::builder().status(response.status);
if let Some(headers) = &response.headers {
for (k, v) in headers.clone() {
for val in v {
res = res.header(k.as_str(), val);
}
}
}
let allow_origin = ACCESS_CONTROL_ALLOW_ORIGIN;
if!response.has_header(allow_origin.as_str()) {
res = res.header(allow_origin, "*");
}
match &response.body {
OptionalBody::Present(ref body, content_type) => {
let content_type_header = CONTENT_TYPE;
if!response.has_header(content_type_header.as_str()) {
let content_type = content_type.clone()
.unwrap_or_else(|| response.content_type().unwrap_or_else(|| TEXT.clone()));
res = res.header(content_type_header, content_type.to_string());
}
res.body(Body::from(body.clone()))
},
_ => res.body(Body::empty())
}
}
#[cfg(test)]
mod test {
use expectest::prelude::*;
use http::header::HeaderValue;
use http::status::StatusCode;
use pact_matching::models::{OptionalBody, Response};
use super::*;
use maplit::*;
#[test]
fn test_response() {
let response = Response {
status: 201,
headers: Some(hashmap! { }),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.status()).to(be_equal_to(StatusCode::CREATED));
expect!(hyper_response.headers().len()).to(be_equal_to(1));
expect!(hyper_response.headers().get("Access-Control-Allow-Origin")).to(be_some().value(HeaderValue::from_static("*")));
}
#[test]
fn test_response_with_content_type() {
let response = Response {
status: 201,
headers: Some(hashmap! { s!("Content-Type") => vec![s!("text/dizzy")] }),
body: OptionalBody::Present("{\"a\": 1, \"b\": 4, \"c\": 6}".as_bytes().into(), None),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.status()).to(be_equal_to(StatusCode::CREATED));
expect!(hyper_response.headers().is_empty()).to(be_false());
expect!(hyper_response.headers().get("content-type")).to(be_some().value(HeaderValue::from_static("text/dizzy")));
}
#[test]
fn adds_a_content_type_if_there_is_not_one_and_there_is_a_body() {
let response = Response {
body: OptionalBody::Present("{\"a\": 1, \"b\": 4, \"c\": 6}".as_bytes().into(), None),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.headers().is_empty()).to(be_false());
expect!(hyper_response.headers().get("content-type")).to(be_some().value(HeaderValue::from_static("application/json")));
}
#[test]
fn only_add_a_cors_origin_header_if_one_has_not_already_been_provided() |
}
| {
let response = Response {
headers: Some(hashmap! { s!("Access-Control-Allow-Origin") => vec![s!("dodgy.com")] }),
.. Response::default()
};
let hyper_response = pact_response_to_hyper_response(&response).unwrap();
expect!(hyper_response.headers().len()).to(be_equal_to(1));
expect!(hyper_response.headers().get("Access-Control-Allow-Origin")).to(be_some().value(HeaderValue::from_static("dodgy.com")));
} | identifier_body |
var_dir_ind_reg.rs | use std::collections::HashMap;
use pest::Error;
use var_instr::variable::{Variable, AsComplete, LabelNotFound};
use var_instr::variable::FromPair;
use machine::instruction::mem_size::MemSize;
use machine::instruction::parameter::{Direct, Indirect, Register, DirIndReg};
use label::Label;
#[derive(Debug)]
pub enum VarDirIndReg {
Direct(Variable<Direct>),
Indirect(Variable<Indirect>),
Register(Register),
}
impl MemSize for VarDirIndReg {
fn mem_size(&self) -> usize {
match *self {
VarDirIndReg::Direct(ref direct) => direct.mem_size(),
VarDirIndReg::Indirect(ref indirect) => indirect.mem_size(),
VarDirIndReg::Register(register) => register.mem_size(),
}
}
}
impl FromPair for VarDirIndReg {
fn from_pair(pair: ::AsmPair) -> Result<Self, ::AsmError> {
match pair.as_rule() {
::Rule::direct => Ok(VarDirIndReg::Direct(Variable::from_pair(pair)?)),
::Rule::indirect => Ok(VarDirIndReg::Indirect(Variable::from_pair(pair)?)),
::Rule::register => Ok(VarDirIndReg::Register(Register::from_pair(pair)?)),
_ => Err(Error::CustomErrorSpan {
message: format!("expected direct, indirect or register found {:?}", pair.as_rule()),
span: pair.clone().into_span(),
}),
}
}
}
impl AsComplete<DirIndReg> for VarDirIndReg {
fn | (&self, offset: usize, label_offsets: &HashMap<Label, usize>) -> Result<DirIndReg, LabelNotFound> {
use self::VarDirIndReg::*;
match *self {
Direct(ref direct) => Ok(DirIndReg::Direct(direct.as_complete(offset, label_offsets)?)),
Indirect(ref indirect) => Ok(DirIndReg::Indirect(indirect.as_complete(offset, label_offsets)?)),
Register(register) => Ok(DirIndReg::Register(register)),
}
}
}
| as_complete | identifier_name |
var_dir_ind_reg.rs | use std::collections::HashMap;
use pest::Error;
use var_instr::variable::{Variable, AsComplete, LabelNotFound};
use var_instr::variable::FromPair;
use machine::instruction::mem_size::MemSize;
use machine::instruction::parameter::{Direct, Indirect, Register, DirIndReg};
use label::Label;
#[derive(Debug)]
pub enum VarDirIndReg {
Direct(Variable<Direct>),
Indirect(Variable<Indirect>),
Register(Register),
}
| VarDirIndReg::Register(register) => register.mem_size(),
}
}
}
impl FromPair for VarDirIndReg {
fn from_pair(pair: ::AsmPair) -> Result<Self, ::AsmError> {
match pair.as_rule() {
::Rule::direct => Ok(VarDirIndReg::Direct(Variable::from_pair(pair)?)),
::Rule::indirect => Ok(VarDirIndReg::Indirect(Variable::from_pair(pair)?)),
::Rule::register => Ok(VarDirIndReg::Register(Register::from_pair(pair)?)),
_ => Err(Error::CustomErrorSpan {
message: format!("expected direct, indirect or register found {:?}", pair.as_rule()),
span: pair.clone().into_span(),
}),
}
}
}
impl AsComplete<DirIndReg> for VarDirIndReg {
fn as_complete(&self, offset: usize, label_offsets: &HashMap<Label, usize>) -> Result<DirIndReg, LabelNotFound> {
use self::VarDirIndReg::*;
match *self {
Direct(ref direct) => Ok(DirIndReg::Direct(direct.as_complete(offset, label_offsets)?)),
Indirect(ref indirect) => Ok(DirIndReg::Indirect(indirect.as_complete(offset, label_offsets)?)),
Register(register) => Ok(DirIndReg::Register(register)),
}
}
} | impl MemSize for VarDirIndReg {
fn mem_size(&self) -> usize {
match *self {
VarDirIndReg::Direct(ref direct) => direct.mem_size(),
VarDirIndReg::Indirect(ref indirect) => indirect.mem_size(), | random_line_split |
main.rs | extern crate clap;
#[macro_use]
extern crate structopt_derive;
extern crate structopt;
extern crate bytes;
extern crate futures;
extern crate tokio_core;
extern crate tokio_io;
extern crate net2;
extern crate num_cpus;
extern crate resolve;
use std::net::SocketAddr;
use std::time::{self, Duration, SystemTime};
use net2::UdpBuilder;
use net2::unix::UnixUdpBuilderExt;
use resolve::resolver;
use std::thread;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use tokio_core::reactor::{Core, Interval, Timeout};
use tokio_core::net::{UdpSocket, UdpCodec, TcpStream};
use tokio_io::AsyncWrite;
use bytes::{Bytes, IntoBuf, Buf, BytesMut, BufMut};
use futures::{Stream, Sink, AsyncSink, Future};
use structopt::StructOpt;
pub static CHUNK_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
pub static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
pub static ERR_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
struct DgramCodec {
addr: SocketAddr,
}
impl DgramCodec {
pub fn new(addr: SocketAddr) -> Self {
DgramCodec { addr: addr }
}
}
impl UdpCodec for DgramCodec {
type In = Bytes;
type Out = Bytes;
fn decode(&mut self,
_src: &SocketAddr,
buf: &[u8])
-> ::std::result::Result<Self::In, ::std::io::Error> {
Ok(buf.into())
}
fn encode(&mut self, msg: Self::Out, buf: &mut Vec<u8>) -> SocketAddr {
use std::io::Read;
msg.into_buf().reader().read_to_end(buf).unwrap();
self.addr
}
}
fn try_resolve(s: &str) -> SocketAddr {
s.parse()
.unwrap_or_else(|_| {
// for name that have failed to be parsed we try to resolve it via DNS
let mut split = s.split(':');
let host = split.next().unwrap(); // Split always has first element
let port = split.next().expect("port not found");
let port = port.parse().expect("bad port value");
let first_ip = resolver::resolve_host(host)
.expect("failed resolving backend name")
.next()
.expect("at least one IP address required");
SocketAddr::new(first_ip, port)
})
}
#[derive(StructOpt, Debug)]
#[structopt(about = "UDP multiplexor: copies packets from listened address to specified backends dropping packets on any error")]
struct Options {
#[structopt(short = "l", long = "listen", help = "Address and port to listen to")]
listen: SocketAddr,
#[structopt(short = "b", long = "backend", help = "IP and port of a backend to forward data to. Can be specified multiple times.", value_name="IP:PORT")]
backends: Vec<String>,
#[structopt(short = "n", long = "nthreads", help = "Number of worker threads, use 0 to use all CPU cores", default_value = "0")]
nthreads: usize,
#[structopt(short = "p", long = "pool", help = "Socket pool size", default_value = "4")]
snum: usize,
#[structopt(short = "g", long = "greens", help = "Number of green threads per worker hread", default_value = "4")]
greens: usize,
#[structopt(short = "s", long = "size", help = "Internal queue buffer size in packets", default_value = "1048576")]
bufsize: usize,
#[structopt(short = "t", long = "interval", help = "Stats printing/sending interval, in milliseconds", default_value = "1000")]
interval: Option<u64>, // u64 has a special meaning in structopt
#[structopt(short = "P", long = "stats-prefix", help = "Metric name prefix", default_value="")]
stats_prefix: String,
#[structopt(short = "S", long = "stats", help = "Graphite plaintext format compatible address:port to send metrics to")]
stats: Option<String>,
}
fn main() {
let mut opts = Options::from_args();
if opts.nthreads == 0 {
opts.nthreads = num_cpus::get();
}
if opts.snum == 0 {
panic!("Number of sockets cannot be zero")
}
if opts.greens == 0 {
panic!("Number of green threads cannot be zero")
}
// Init chunk counter
// In the main thread start a reporting timer
let mut core = Core::new().unwrap();
let handle = core.handle();
let timer = Interval::new(Duration::from_millis(opts.interval.unwrap()), &handle).unwrap();
let interval = opts.interval.unwrap() as f64 / 1000f64;
let stats = opts.stats.map(|ref stats| try_resolve(stats));
let prefix = opts.stats_prefix;
let timer = timer.for_each(|()|{
let chunks = CHUNK_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
let drops = DROP_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
let errors = ERR_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
println!("chunks/drops/errors: {:.2}/{:.2}/{:.2}", chunks, drops, errors);
match stats {
Some(addr) => {
let tcp_timeout = Timeout::new(Duration::from_millis(((interval / 2f64) * 1000f64).floor() as u64), &handle)
.unwrap()
.map_err(|_| ());
let prefix = prefix.clone();
let sender = TcpStream::connect(&addr, &handle).and_then(move |mut conn| {
let ts = SystemTime::now()
.duration_since(time::UNIX_EPOCH).unwrap();
let ts = ts.as_secs().to_string();
// prefix length can be various sizes, we need capacity for it,
// we also need space for other values that are around:
// 2 spaces, 7 chars for metric suffix
// around 26 chars for f64 value, but cannot be sure it takes more
// around 10 chars for timestamp
// so minimal value is ~45, we'll take 128 just in case float is very large
// for 3 metrics this must be tripled
let mut buf = BytesMut::with_capacity((prefix.len() + 128)*3);
buf.put(&prefix);
buf.put(".udpdup.chunks ");
buf.put(chunks.to_string());
buf.put(" ");
buf.put(&ts);
buf.put("\n");
buf.put(&prefix);
buf.put(".udpdup.drops ");
buf.put(drops.to_string());
buf.put(" ");
buf.put(&ts);
buf.put("\n");
buf.put(&prefix);
buf.put(".udpdup.errors ");
buf.put(errors.to_string());
buf.put(" ");
buf.put(&ts);
conn.write_buf(&mut buf.into_buf()).map(|_|())
}).map_err(|e| println!("Error sending stats: {:?}", e)).select(tcp_timeout).then(|_|Ok(()));
handle.spawn(sender);
}
None => (),
};
Ok(())
});
let backends = opts.backends
.iter()
.map(|b| try_resolve(b))
.collect::<Vec<SocketAddr>>();
// Create a pool of listener sockets
let mut sockets = Vec::new();
for _ in 0..opts.snum {
let socket = UdpBuilder::new_v4().unwrap();
socket.reuse_address(true).unwrap();
socket.reuse_port(true).unwrap();
let socket = socket.bind(&opts.listen).unwrap();
sockets.push(socket);
}
let bufsize = opts.bufsize;
let listen = opts.listen;
let greens = opts.greens;
for i in 0..opts.nthreads {
let backends = backends.clone();
// Each thread gets the clone of a socket pool
let sockets = sockets
.iter()
.map(|s| s.try_clone().unwrap())
.collect::<Vec<_>>();
thread::Builder::new()
.name(format!("udpdup_worker{}", i).into())
.spawn(move || {
// each thread runs it's own core
let mut core = Core::new().unwrap();
let handle = core.handle();
// Create sockets for each backend address
let senders = backends
.iter()
.map(|_| {
let socket = UdpBuilder::new_v4().unwrap();
let bind: SocketAddr = "0.0.0.0:0".parse().unwrap();
socket.bind(&bind).unwrap()
})
.collect::<Vec<_>>();
for _ in 0..greens {
for socket in sockets.iter() {
let senders = senders
.iter()
.map(|socket| socket.try_clone().expect("client socket cloning"))
.collect::<Vec<_>>();
// make senders into framed UDP
let mut backends = backends.clone();
let mut senders = senders
.into_iter()
.map(|socket| {
let sender = UdpSocket::from_socket(socket, &handle).unwrap();
// socket order becomes reversed but we don't care about this
// because the order is same everywhere
// and anyways, we push same data to every socket
let sender = sender
.framed(DgramCodec::new(backends.pop().unwrap()))
.buffer(bufsize);
sender
})
.collect::<Vec<_>>();
// create server now
let socket = socket.try_clone().expect("server socket cloning");
let socket = UdpSocket::from_socket(socket, &handle).unwrap();
let server = socket.framed(DgramCodec::new(listen)).buffer(bufsize);
let server = server.for_each(move |buf| {
CHUNK_COUNTER.fetch_add(1, Ordering::Relaxed);
senders
.iter_mut()
.map(|sender| {
let res = sender.start_send(buf.clone());
match res {
Err(_) => {
ERR_COUNTER.fetch_add(1, Ordering::Relaxed);
}
Ok(AsyncSink::NotReady(_)) => {
DROP_COUNTER.fetch_add(1, Ordering::Relaxed);
}
Ok(AsyncSink::Ready) => {
let res = sender.poll_complete();
if res.is_err() |
}
}
})
.last();
Ok(())
});
handle.spawn(server.map_err(|_| ()));
}
}
core.run(::futures::future::empty::<(), ()>()).unwrap();
})
.unwrap();
}
drop(sockets);
core.run(timer).unwrap();
}
| {
ERR_COUNTER.fetch_add(1, Ordering::Relaxed);
} | conditional_block |
main.rs | extern crate clap;
#[macro_use]
extern crate structopt_derive;
extern crate structopt;
extern crate bytes;
extern crate futures;
extern crate tokio_core;
extern crate tokio_io;
extern crate net2;
extern crate num_cpus;
extern crate resolve;
use std::net::SocketAddr;
use std::time::{self, Duration, SystemTime};
use net2::UdpBuilder;
use net2::unix::UnixUdpBuilderExt;
use resolve::resolver;
use std::thread;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use tokio_core::reactor::{Core, Interval, Timeout};
use tokio_core::net::{UdpSocket, UdpCodec, TcpStream};
use tokio_io::AsyncWrite;
use bytes::{Bytes, IntoBuf, Buf, BytesMut, BufMut};
use futures::{Stream, Sink, AsyncSink, Future};
use structopt::StructOpt;
pub static CHUNK_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
pub static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
pub static ERR_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
struct DgramCodec {
addr: SocketAddr,
}
impl DgramCodec {
pub fn new(addr: SocketAddr) -> Self {
DgramCodec { addr: addr }
}
}
impl UdpCodec for DgramCodec {
type In = Bytes;
type Out = Bytes;
fn decode(&mut self,
_src: &SocketAddr,
buf: &[u8])
-> ::std::result::Result<Self::In, ::std::io::Error> {
Ok(buf.into())
}
fn encode(&mut self, msg: Self::Out, buf: &mut Vec<u8>) -> SocketAddr {
use std::io::Read;
msg.into_buf().reader().read_to_end(buf).unwrap();
self.addr
}
}
fn try_resolve(s: &str) -> SocketAddr {
s.parse()
.unwrap_or_else(|_| {
// for name that have failed to be parsed we try to resolve it via DNS
let mut split = s.split(':');
let host = split.next().unwrap(); // Split always has first element
let port = split.next().expect("port not found");
let port = port.parse().expect("bad port value");
let first_ip = resolver::resolve_host(host)
.expect("failed resolving backend name")
.next()
.expect("at least one IP address required");
SocketAddr::new(first_ip, port)
})
}
#[derive(StructOpt, Debug)]
#[structopt(about = "UDP multiplexor: copies packets from listened address to specified backends dropping packets on any error")]
struct Options {
#[structopt(short = "l", long = "listen", help = "Address and port to listen to")]
listen: SocketAddr,
#[structopt(short = "b", long = "backend", help = "IP and port of a backend to forward data to. Can be specified multiple times.", value_name="IP:PORT")]
backends: Vec<String>,
#[structopt(short = "n", long = "nthreads", help = "Number of worker threads, use 0 to use all CPU cores", default_value = "0")]
nthreads: usize,
#[structopt(short = "p", long = "pool", help = "Socket pool size", default_value = "4")]
snum: usize,
#[structopt(short = "g", long = "greens", help = "Number of green threads per worker hread", default_value = "4")]
greens: usize,
#[structopt(short = "s", long = "size", help = "Internal queue buffer size in packets", default_value = "1048576")]
bufsize: usize,
#[structopt(short = "t", long = "interval", help = "Stats printing/sending interval, in milliseconds", default_value = "1000")]
interval: Option<u64>, // u64 has a special meaning in structopt
#[structopt(short = "P", long = "stats-prefix", help = "Metric name prefix", default_value="")] | stats_prefix: String,
#[structopt(short = "S", long = "stats", help = "Graphite plaintext format compatible address:port to send metrics to")]
stats: Option<String>,
}
fn main() {
let mut opts = Options::from_args();
if opts.nthreads == 0 {
opts.nthreads = num_cpus::get();
}
if opts.snum == 0 {
panic!("Number of sockets cannot be zero")
}
if opts.greens == 0 {
panic!("Number of green threads cannot be zero")
}
// Init chunk counter
// In the main thread start a reporting timer
let mut core = Core::new().unwrap();
let handle = core.handle();
let timer = Interval::new(Duration::from_millis(opts.interval.unwrap()), &handle).unwrap();
let interval = opts.interval.unwrap() as f64 / 1000f64;
let stats = opts.stats.map(|ref stats| try_resolve(stats));
let prefix = opts.stats_prefix;
let timer = timer.for_each(|()|{
let chunks = CHUNK_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
let drops = DROP_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
let errors = ERR_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
println!("chunks/drops/errors: {:.2}/{:.2}/{:.2}", chunks, drops, errors);
match stats {
Some(addr) => {
let tcp_timeout = Timeout::new(Duration::from_millis(((interval / 2f64) * 1000f64).floor() as u64), &handle)
.unwrap()
.map_err(|_| ());
let prefix = prefix.clone();
let sender = TcpStream::connect(&addr, &handle).and_then(move |mut conn| {
let ts = SystemTime::now()
.duration_since(time::UNIX_EPOCH).unwrap();
let ts = ts.as_secs().to_string();
// prefix length can be various sizes, we need capacity for it,
// we also need space for other values that are around:
// 2 spaces, 7 chars for metric suffix
// around 26 chars for f64 value, but cannot be sure it takes more
// around 10 chars for timestamp
// so minimal value is ~45, we'll take 128 just in case float is very large
// for 3 metrics this must be tripled
let mut buf = BytesMut::with_capacity((prefix.len() + 128)*3);
buf.put(&prefix);
buf.put(".udpdup.chunks ");
buf.put(chunks.to_string());
buf.put(" ");
buf.put(&ts);
buf.put("\n");
buf.put(&prefix);
buf.put(".udpdup.drops ");
buf.put(drops.to_string());
buf.put(" ");
buf.put(&ts);
buf.put("\n");
buf.put(&prefix);
buf.put(".udpdup.errors ");
buf.put(errors.to_string());
buf.put(" ");
buf.put(&ts);
conn.write_buf(&mut buf.into_buf()).map(|_|())
}).map_err(|e| println!("Error sending stats: {:?}", e)).select(tcp_timeout).then(|_|Ok(()));
handle.spawn(sender);
}
None => (),
};
Ok(())
});
let backends = opts.backends
.iter()
.map(|b| try_resolve(b))
.collect::<Vec<SocketAddr>>();
// Create a pool of listener sockets
let mut sockets = Vec::new();
for _ in 0..opts.snum {
let socket = UdpBuilder::new_v4().unwrap();
socket.reuse_address(true).unwrap();
socket.reuse_port(true).unwrap();
let socket = socket.bind(&opts.listen).unwrap();
sockets.push(socket);
}
let bufsize = opts.bufsize;
let listen = opts.listen;
let greens = opts.greens;
for i in 0..opts.nthreads {
let backends = backends.clone();
// Each thread gets the clone of a socket pool
let sockets = sockets
.iter()
.map(|s| s.try_clone().unwrap())
.collect::<Vec<_>>();
thread::Builder::new()
.name(format!("udpdup_worker{}", i).into())
.spawn(move || {
// each thread runs it's own core
let mut core = Core::new().unwrap();
let handle = core.handle();
// Create sockets for each backend address
let senders = backends
.iter()
.map(|_| {
let socket = UdpBuilder::new_v4().unwrap();
let bind: SocketAddr = "0.0.0.0:0".parse().unwrap();
socket.bind(&bind).unwrap()
})
.collect::<Vec<_>>();
for _ in 0..greens {
for socket in sockets.iter() {
let senders = senders
.iter()
.map(|socket| socket.try_clone().expect("client socket cloning"))
.collect::<Vec<_>>();
// make senders into framed UDP
let mut backends = backends.clone();
let mut senders = senders
.into_iter()
.map(|socket| {
let sender = UdpSocket::from_socket(socket, &handle).unwrap();
// socket order becomes reversed but we don't care about this
// because the order is same everywhere
// and anyways, we push same data to every socket
let sender = sender
.framed(DgramCodec::new(backends.pop().unwrap()))
.buffer(bufsize);
sender
})
.collect::<Vec<_>>();
// create server now
let socket = socket.try_clone().expect("server socket cloning");
let socket = UdpSocket::from_socket(socket, &handle).unwrap();
let server = socket.framed(DgramCodec::new(listen)).buffer(bufsize);
let server = server.for_each(move |buf| {
CHUNK_COUNTER.fetch_add(1, Ordering::Relaxed);
senders
.iter_mut()
.map(|sender| {
let res = sender.start_send(buf.clone());
match res {
Err(_) => {
ERR_COUNTER.fetch_add(1, Ordering::Relaxed);
}
Ok(AsyncSink::NotReady(_)) => {
DROP_COUNTER.fetch_add(1, Ordering::Relaxed);
}
Ok(AsyncSink::Ready) => {
let res = sender.poll_complete();
if res.is_err() {
ERR_COUNTER.fetch_add(1, Ordering::Relaxed);
}
}
}
})
.last();
Ok(())
});
handle.spawn(server.map_err(|_| ()));
}
}
core.run(::futures::future::empty::<(), ()>()).unwrap();
})
.unwrap();
}
drop(sockets);
core.run(timer).unwrap();
} | random_line_split |
|
main.rs | extern crate clap;
#[macro_use]
extern crate structopt_derive;
extern crate structopt;
extern crate bytes;
extern crate futures;
extern crate tokio_core;
extern crate tokio_io;
extern crate net2;
extern crate num_cpus;
extern crate resolve;
use std::net::SocketAddr;
use std::time::{self, Duration, SystemTime};
use net2::UdpBuilder;
use net2::unix::UnixUdpBuilderExt;
use resolve::resolver;
use std::thread;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use tokio_core::reactor::{Core, Interval, Timeout};
use tokio_core::net::{UdpSocket, UdpCodec, TcpStream};
use tokio_io::AsyncWrite;
use bytes::{Bytes, IntoBuf, Buf, BytesMut, BufMut};
use futures::{Stream, Sink, AsyncSink, Future};
use structopt::StructOpt;
pub static CHUNK_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
pub static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
pub static ERR_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
struct | {
addr: SocketAddr,
}
impl DgramCodec {
pub fn new(addr: SocketAddr) -> Self {
DgramCodec { addr: addr }
}
}
impl UdpCodec for DgramCodec {
type In = Bytes;
type Out = Bytes;
fn decode(&mut self,
_src: &SocketAddr,
buf: &[u8])
-> ::std::result::Result<Self::In, ::std::io::Error> {
Ok(buf.into())
}
fn encode(&mut self, msg: Self::Out, buf: &mut Vec<u8>) -> SocketAddr {
use std::io::Read;
msg.into_buf().reader().read_to_end(buf).unwrap();
self.addr
}
}
fn try_resolve(s: &str) -> SocketAddr {
s.parse()
.unwrap_or_else(|_| {
// for name that have failed to be parsed we try to resolve it via DNS
let mut split = s.split(':');
let host = split.next().unwrap(); // Split always has first element
let port = split.next().expect("port not found");
let port = port.parse().expect("bad port value");
let first_ip = resolver::resolve_host(host)
.expect("failed resolving backend name")
.next()
.expect("at least one IP address required");
SocketAddr::new(first_ip, port)
})
}
#[derive(StructOpt, Debug)]
#[structopt(about = "UDP multiplexor: copies packets from listened address to specified backends dropping packets on any error")]
struct Options {
#[structopt(short = "l", long = "listen", help = "Address and port to listen to")]
listen: SocketAddr,
#[structopt(short = "b", long = "backend", help = "IP and port of a backend to forward data to. Can be specified multiple times.", value_name="IP:PORT")]
backends: Vec<String>,
#[structopt(short = "n", long = "nthreads", help = "Number of worker threads, use 0 to use all CPU cores", default_value = "0")]
nthreads: usize,
#[structopt(short = "p", long = "pool", help = "Socket pool size", default_value = "4")]
snum: usize,
#[structopt(short = "g", long = "greens", help = "Number of green threads per worker hread", default_value = "4")]
greens: usize,
#[structopt(short = "s", long = "size", help = "Internal queue buffer size in packets", default_value = "1048576")]
bufsize: usize,
#[structopt(short = "t", long = "interval", help = "Stats printing/sending interval, in milliseconds", default_value = "1000")]
interval: Option<u64>, // u64 has a special meaning in structopt
#[structopt(short = "P", long = "stats-prefix", help = "Metric name prefix", default_value="")]
stats_prefix: String,
#[structopt(short = "S", long = "stats", help = "Graphite plaintext format compatible address:port to send metrics to")]
stats: Option<String>,
}
fn main() {
let mut opts = Options::from_args();
if opts.nthreads == 0 {
opts.nthreads = num_cpus::get();
}
if opts.snum == 0 {
panic!("Number of sockets cannot be zero")
}
if opts.greens == 0 {
panic!("Number of green threads cannot be zero")
}
// Init chunk counter
// In the main thread start a reporting timer
let mut core = Core::new().unwrap();
let handle = core.handle();
let timer = Interval::new(Duration::from_millis(opts.interval.unwrap()), &handle).unwrap();
let interval = opts.interval.unwrap() as f64 / 1000f64;
let stats = opts.stats.map(|ref stats| try_resolve(stats));
let prefix = opts.stats_prefix;
let timer = timer.for_each(|()|{
let chunks = CHUNK_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
let drops = DROP_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
let errors = ERR_COUNTER.swap(0, Ordering::Relaxed) as f64 / interval;
println!("chunks/drops/errors: {:.2}/{:.2}/{:.2}", chunks, drops, errors);
match stats {
Some(addr) => {
let tcp_timeout = Timeout::new(Duration::from_millis(((interval / 2f64) * 1000f64).floor() as u64), &handle)
.unwrap()
.map_err(|_| ());
let prefix = prefix.clone();
let sender = TcpStream::connect(&addr, &handle).and_then(move |mut conn| {
let ts = SystemTime::now()
.duration_since(time::UNIX_EPOCH).unwrap();
let ts = ts.as_secs().to_string();
// prefix length can be various sizes, we need capacity for it,
// we also need space for other values that are around:
// 2 spaces, 7 chars for metric suffix
// around 26 chars for f64 value, but cannot be sure it takes more
// around 10 chars for timestamp
// so minimal value is ~45, we'll take 128 just in case float is very large
// for 3 metrics this must be tripled
let mut buf = BytesMut::with_capacity((prefix.len() + 128)*3);
buf.put(&prefix);
buf.put(".udpdup.chunks ");
buf.put(chunks.to_string());
buf.put(" ");
buf.put(&ts);
buf.put("\n");
buf.put(&prefix);
buf.put(".udpdup.drops ");
buf.put(drops.to_string());
buf.put(" ");
buf.put(&ts);
buf.put("\n");
buf.put(&prefix);
buf.put(".udpdup.errors ");
buf.put(errors.to_string());
buf.put(" ");
buf.put(&ts);
conn.write_buf(&mut buf.into_buf()).map(|_|())
}).map_err(|e| println!("Error sending stats: {:?}", e)).select(tcp_timeout).then(|_|Ok(()));
handle.spawn(sender);
}
None => (),
};
Ok(())
});
let backends = opts.backends
.iter()
.map(|b| try_resolve(b))
.collect::<Vec<SocketAddr>>();
// Create a pool of listener sockets
let mut sockets = Vec::new();
for _ in 0..opts.snum {
let socket = UdpBuilder::new_v4().unwrap();
socket.reuse_address(true).unwrap();
socket.reuse_port(true).unwrap();
let socket = socket.bind(&opts.listen).unwrap();
sockets.push(socket);
}
let bufsize = opts.bufsize;
let listen = opts.listen;
let greens = opts.greens;
for i in 0..opts.nthreads {
let backends = backends.clone();
// Each thread gets the clone of a socket pool
let sockets = sockets
.iter()
.map(|s| s.try_clone().unwrap())
.collect::<Vec<_>>();
thread::Builder::new()
.name(format!("udpdup_worker{}", i).into())
.spawn(move || {
// each thread runs it's own core
let mut core = Core::new().unwrap();
let handle = core.handle();
// Create sockets for each backend address
let senders = backends
.iter()
.map(|_| {
let socket = UdpBuilder::new_v4().unwrap();
let bind: SocketAddr = "0.0.0.0:0".parse().unwrap();
socket.bind(&bind).unwrap()
})
.collect::<Vec<_>>();
for _ in 0..greens {
for socket in sockets.iter() {
let senders = senders
.iter()
.map(|socket| socket.try_clone().expect("client socket cloning"))
.collect::<Vec<_>>();
// make senders into framed UDP
let mut backends = backends.clone();
let mut senders = senders
.into_iter()
.map(|socket| {
let sender = UdpSocket::from_socket(socket, &handle).unwrap();
// socket order becomes reversed but we don't care about this
// because the order is same everywhere
// and anyways, we push same data to every socket
let sender = sender
.framed(DgramCodec::new(backends.pop().unwrap()))
.buffer(bufsize);
sender
})
.collect::<Vec<_>>();
// create server now
let socket = socket.try_clone().expect("server socket cloning");
let socket = UdpSocket::from_socket(socket, &handle).unwrap();
let server = socket.framed(DgramCodec::new(listen)).buffer(bufsize);
let server = server.for_each(move |buf| {
CHUNK_COUNTER.fetch_add(1, Ordering::Relaxed);
senders
.iter_mut()
.map(|sender| {
let res = sender.start_send(buf.clone());
match res {
Err(_) => {
ERR_COUNTER.fetch_add(1, Ordering::Relaxed);
}
Ok(AsyncSink::NotReady(_)) => {
DROP_COUNTER.fetch_add(1, Ordering::Relaxed);
}
Ok(AsyncSink::Ready) => {
let res = sender.poll_complete();
if res.is_err() {
ERR_COUNTER.fetch_add(1, Ordering::Relaxed);
}
}
}
})
.last();
Ok(())
});
handle.spawn(server.map_err(|_| ()));
}
}
core.run(::futures::future::empty::<(), ()>()).unwrap();
})
.unwrap();
}
drop(sockets);
core.run(timer).unwrap();
}
| DgramCodec | identifier_name |
issue-33174-restricted-type-in-public-interface.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)] // genus is always capitalized
pub(crate) struct Snail;
//~^ NOTE `Snail` declared as crate-visible
mod sea {
pub(super) struct Turtle;
//~^ NOTE `sea::Turtle` declared as restricted
}
struct | ;
//~^ NOTE `Tortoise` declared as private
pub struct Shell<T> {
pub(crate) creature: T,
}
pub type Helix_pomatia = Shell<Snail>;
//~^ ERROR crate-visible type `Snail` in public interface
//~| NOTE can't leak crate-visible type
pub type Dermochelys_coriacea = Shell<sea::Turtle>;
//~^ ERROR restricted type `sea::Turtle` in public interface
//~| NOTE can't leak restricted type
pub type Testudo_graeca = Shell<Tortoise>;
//~^ ERROR private type `Tortoise` in public interface
//~| NOTE can't leak private type
fn main() {}
| Tortoise | identifier_name |
issue-33174-restricted-type-in-public-interface.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)] // genus is always capitalized
pub(crate) struct Snail;
//~^ NOTE `Snail` declared as crate-visible
mod sea {
pub(super) struct Turtle;
//~^ NOTE `sea::Turtle` declared as restricted
}
struct Tortoise;
//~^ NOTE `Tortoise` declared as private
pub struct Shell<T> {
pub(crate) creature: T,
}
pub type Helix_pomatia = Shell<Snail>;
//~^ ERROR crate-visible type `Snail` in public interface
//~| NOTE can't leak crate-visible type
pub type Dermochelys_coriacea = Shell<sea::Turtle>;
//~^ ERROR restricted type `sea::Turtle` in public interface
//~| NOTE can't leak restricted type
pub type Testudo_graeca = Shell<Tortoise>;
//~^ ERROR private type `Tortoise` in public interface
//~| NOTE can't leak private type
fn main() {} | // http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | random_line_split |
euler11.rs | #[cfg_attr(rustfmt, rustfmt_skip)]
#[allow(zero_prefixed_literal)]
const GRID: [[u32; 20]; 20] = [
[08,02,22,97,38,15,00,40,00,75,04,05,07,78,52,12,50,77,91,08],
[49,49,99,40,17,81,18,57,60,87,17,40,98,43,69,48,04,56,62,00],
[81,49,31,73,55,79,14,29,93,71,40,67,53,88,30,03,49,13,36,65],
[52,70,95,23,04,60,11,42,69,24,68,56,01,32,56,71,37,02,36,91],
[22,31,16,71,51,67,63,89,41,92,36,54,22,40,40,28,66,33,13,80],
[24,47,32,60,99,03,45,02,44,75,33,53,78,36,84,20,35,17,12,50],
[32,98,81,28,64,23,67,10,26,38,40,67,59,54,70,66,18,38,64,70],
[67,26,20,68,02,62,12,20,95,63,94,39,63,08,40,91,66,49,94,21],
[24,55,58,05,66,73,99,26,97,17,78,78,96,83,14,88,34,89,63,72],
[21,36,23,09,75,00,76,44,20,45,35,14,00,61,33,97,34,31,33,95],
[78,17,53,28,22,75,31,67,15,94,03,80,04,62,16,14,09,53,56,92],
[16,39,05,42,96,35,31,47,55,58,88,24,00,17,54,24,36,29,85,57],
[86,56,00,48,35,71,89,07,05,44,44,37,44,60,21,58,51,54,17,58],
[19,80,81,68,05,94,47,69,28,73,92,13,86,52,17,77,04,89,55,40],
[04,52,08,83,97,35,99,16,07,97,57,32,16,26,26,79,33,27,98,66],
[88,36,68,87,57,62,20,72,03,46,33,67,46,55,12,32,63,93,53,69],
[04,42,16,73,38,25,39,11,24,94,72,18,08,46,29,32,40,62,76,36],
[20,69,36,41,72,30,23,88,34,62,99,69,82,67,59,85,74,04,36,16],
[20,73,35,29,78,31,90,01,74,31,49,71,48,86,81,16,23,57,05,54],
[01,70,54,71,83,51,54,69,16,92,33,48,61,43,52,01,89,19,67,48]
];
| let mut max = 0;
for i in 0..20 {
for j in 0..20 {
if j < 17 {
let p = product(&GRID[i][j..j + 4]);
if p > max {
max = p
}
}
if i < 17 {
let p = product(&[GRID[i][j], GRID[i + 1][j], GRID[i + 2][j], GRID[i + 3][j]]);
if p > max {
max = p
}
}
if i < 17 && j < 17 {
let p = product(&[GRID[i][j],
GRID[i + 1][j + 1],
GRID[i + 2][j + 2],
GRID[i + 3][j + 3]]);
if p > max {
max = p
}
}
if i > 2 && j < 17 {
let p = product(&[GRID[i][j],
GRID[i - 1][j + 1],
GRID[i - 2][j + 2],
GRID[i - 3][j + 3]]);
if p > max {
max = p
}
}
}
}
println!("{}", max);
} | pub fn main() {
let product = |slice: &[u32]| slice.iter().fold(1, |prod, i| prod * i); | random_line_split |
euler11.rs | #[cfg_attr(rustfmt, rustfmt_skip)]
#[allow(zero_prefixed_literal)]
const GRID: [[u32; 20]; 20] = [
[08,02,22,97,38,15,00,40,00,75,04,05,07,78,52,12,50,77,91,08],
[49,49,99,40,17,81,18,57,60,87,17,40,98,43,69,48,04,56,62,00],
[81,49,31,73,55,79,14,29,93,71,40,67,53,88,30,03,49,13,36,65],
[52,70,95,23,04,60,11,42,69,24,68,56,01,32,56,71,37,02,36,91],
[22,31,16,71,51,67,63,89,41,92,36,54,22,40,40,28,66,33,13,80],
[24,47,32,60,99,03,45,02,44,75,33,53,78,36,84,20,35,17,12,50],
[32,98,81,28,64,23,67,10,26,38,40,67,59,54,70,66,18,38,64,70],
[67,26,20,68,02,62,12,20,95,63,94,39,63,08,40,91,66,49,94,21],
[24,55,58,05,66,73,99,26,97,17,78,78,96,83,14,88,34,89,63,72],
[21,36,23,09,75,00,76,44,20,45,35,14,00,61,33,97,34,31,33,95],
[78,17,53,28,22,75,31,67,15,94,03,80,04,62,16,14,09,53,56,92],
[16,39,05,42,96,35,31,47,55,58,88,24,00,17,54,24,36,29,85,57],
[86,56,00,48,35,71,89,07,05,44,44,37,44,60,21,58,51,54,17,58],
[19,80,81,68,05,94,47,69,28,73,92,13,86,52,17,77,04,89,55,40],
[04,52,08,83,97,35,99,16,07,97,57,32,16,26,26,79,33,27,98,66],
[88,36,68,87,57,62,20,72,03,46,33,67,46,55,12,32,63,93,53,69],
[04,42,16,73,38,25,39,11,24,94,72,18,08,46,29,32,40,62,76,36],
[20,69,36,41,72,30,23,88,34,62,99,69,82,67,59,85,74,04,36,16],
[20,73,35,29,78,31,90,01,74,31,49,71,48,86,81,16,23,57,05,54],
[01,70,54,71,83,51,54,69,16,92,33,48,61,43,52,01,89,19,67,48]
];
pub fn main() | GRID[i + 2][j + 2],
GRID[i + 3][j + 3]]);
if p > max {
max = p
}
}
if i > 2 && j < 17 {
let p = product(&[GRID[i][j],
GRID[i - 1][j + 1],
GRID[i - 2][j + 2],
GRID[i - 3][j + 3]]);
if p > max {
max = p
}
}
}
}
println!("{}", max);
}
| {
let product = |slice: &[u32]| slice.iter().fold(1, |prod, i| prod * i);
let mut max = 0;
for i in 0..20 {
for j in 0..20 {
if j < 17 {
let p = product(&GRID[i][j..j + 4]);
if p > max {
max = p
}
}
if i < 17 {
let p = product(&[GRID[i][j], GRID[i + 1][j], GRID[i + 2][j], GRID[i + 3][j]]);
if p > max {
max = p
}
}
if i < 17 && j < 17 {
let p = product(&[GRID[i][j],
GRID[i + 1][j + 1], | identifier_body |
euler11.rs | #[cfg_attr(rustfmt, rustfmt_skip)]
#[allow(zero_prefixed_literal)]
const GRID: [[u32; 20]; 20] = [
[08,02,22,97,38,15,00,40,00,75,04,05,07,78,52,12,50,77,91,08],
[49,49,99,40,17,81,18,57,60,87,17,40,98,43,69,48,04,56,62,00],
[81,49,31,73,55,79,14,29,93,71,40,67,53,88,30,03,49,13,36,65],
[52,70,95,23,04,60,11,42,69,24,68,56,01,32,56,71,37,02,36,91],
[22,31,16,71,51,67,63,89,41,92,36,54,22,40,40,28,66,33,13,80],
[24,47,32,60,99,03,45,02,44,75,33,53,78,36,84,20,35,17,12,50],
[32,98,81,28,64,23,67,10,26,38,40,67,59,54,70,66,18,38,64,70],
[67,26,20,68,02,62,12,20,95,63,94,39,63,08,40,91,66,49,94,21],
[24,55,58,05,66,73,99,26,97,17,78,78,96,83,14,88,34,89,63,72],
[21,36,23,09,75,00,76,44,20,45,35,14,00,61,33,97,34,31,33,95],
[78,17,53,28,22,75,31,67,15,94,03,80,04,62,16,14,09,53,56,92],
[16,39,05,42,96,35,31,47,55,58,88,24,00,17,54,24,36,29,85,57],
[86,56,00,48,35,71,89,07,05,44,44,37,44,60,21,58,51,54,17,58],
[19,80,81,68,05,94,47,69,28,73,92,13,86,52,17,77,04,89,55,40],
[04,52,08,83,97,35,99,16,07,97,57,32,16,26,26,79,33,27,98,66],
[88,36,68,87,57,62,20,72,03,46,33,67,46,55,12,32,63,93,53,69],
[04,42,16,73,38,25,39,11,24,94,72,18,08,46,29,32,40,62,76,36],
[20,69,36,41,72,30,23,88,34,62,99,69,82,67,59,85,74,04,36,16],
[20,73,35,29,78,31,90,01,74,31,49,71,48,86,81,16,23,57,05,54],
[01,70,54,71,83,51,54,69,16,92,33,48,61,43,52,01,89,19,67,48]
];
pub fn | () {
let product = |slice: &[u32]| slice.iter().fold(1, |prod, i| prod * i);
let mut max = 0;
for i in 0..20 {
for j in 0..20 {
if j < 17 {
let p = product(&GRID[i][j..j + 4]);
if p > max {
max = p
}
}
if i < 17 {
let p = product(&[GRID[i][j], GRID[i + 1][j], GRID[i + 2][j], GRID[i + 3][j]]);
if p > max {
max = p
}
}
if i < 17 && j < 17 {
let p = product(&[GRID[i][j],
GRID[i + 1][j + 1],
GRID[i + 2][j + 2],
GRID[i + 3][j + 3]]);
if p > max {
max = p
}
}
if i > 2 && j < 17 {
let p = product(&[GRID[i][j],
GRID[i - 1][j + 1],
GRID[i - 2][j + 2],
GRID[i - 3][j + 3]]);
if p > max {
max = p
}
}
}
}
println!("{}", max);
}
| main | identifier_name |
euler11.rs | #[cfg_attr(rustfmt, rustfmt_skip)]
#[allow(zero_prefixed_literal)]
const GRID: [[u32; 20]; 20] = [
[08,02,22,97,38,15,00,40,00,75,04,05,07,78,52,12,50,77,91,08],
[49,49,99,40,17,81,18,57,60,87,17,40,98,43,69,48,04,56,62,00],
[81,49,31,73,55,79,14,29,93,71,40,67,53,88,30,03,49,13,36,65],
[52,70,95,23,04,60,11,42,69,24,68,56,01,32,56,71,37,02,36,91],
[22,31,16,71,51,67,63,89,41,92,36,54,22,40,40,28,66,33,13,80],
[24,47,32,60,99,03,45,02,44,75,33,53,78,36,84,20,35,17,12,50],
[32,98,81,28,64,23,67,10,26,38,40,67,59,54,70,66,18,38,64,70],
[67,26,20,68,02,62,12,20,95,63,94,39,63,08,40,91,66,49,94,21],
[24,55,58,05,66,73,99,26,97,17,78,78,96,83,14,88,34,89,63,72],
[21,36,23,09,75,00,76,44,20,45,35,14,00,61,33,97,34,31,33,95],
[78,17,53,28,22,75,31,67,15,94,03,80,04,62,16,14,09,53,56,92],
[16,39,05,42,96,35,31,47,55,58,88,24,00,17,54,24,36,29,85,57],
[86,56,00,48,35,71,89,07,05,44,44,37,44,60,21,58,51,54,17,58],
[19,80,81,68,05,94,47,69,28,73,92,13,86,52,17,77,04,89,55,40],
[04,52,08,83,97,35,99,16,07,97,57,32,16,26,26,79,33,27,98,66],
[88,36,68,87,57,62,20,72,03,46,33,67,46,55,12,32,63,93,53,69],
[04,42,16,73,38,25,39,11,24,94,72,18,08,46,29,32,40,62,76,36],
[20,69,36,41,72,30,23,88,34,62,99,69,82,67,59,85,74,04,36,16],
[20,73,35,29,78,31,90,01,74,31,49,71,48,86,81,16,23,57,05,54],
[01,70,54,71,83,51,54,69,16,92,33,48,61,43,52,01,89,19,67,48]
];
pub fn main() {
let product = |slice: &[u32]| slice.iter().fold(1, |prod, i| prod * i);
let mut max = 0;
for i in 0..20 {
for j in 0..20 {
if j < 17 |
if i < 17 {
let p = product(&[GRID[i][j], GRID[i + 1][j], GRID[i + 2][j], GRID[i + 3][j]]);
if p > max {
max = p
}
}
if i < 17 && j < 17 {
let p = product(&[GRID[i][j],
GRID[i + 1][j + 1],
GRID[i + 2][j + 2],
GRID[i + 3][j + 3]]);
if p > max {
max = p
}
}
if i > 2 && j < 17 {
let p = product(&[GRID[i][j],
GRID[i - 1][j + 1],
GRID[i - 2][j + 2],
GRID[i - 3][j + 3]]);
if p > max {
max = p
}
}
}
}
println!("{}", max);
}
| {
let p = product(&GRID[i][j..j + 4]);
if p > max {
max = p
}
} | conditional_block |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::LayoutContext;
use display_list_builder::DisplayListBuildState;
use flow::{FlowFlags, Flow, GetBaseFlow, ImmutableFlowUtils};
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use servo_config::opts;
use style::context::{SharedStyleContext, StyleContext};
use style::data::ElementData;
use style::dom::{NodeInfo, TElement, TNode};
use style::selector_parser::RestyleDamage;
use style::servo::restyle_damage::ServoRestyleDamage;
use style::traversal::{DomTraversal, recalc_style_at};
use style::traversal::PerLevelTraversalData;
use wrapper::{GetRawData, LayoutNodeLayoutData};
use wrapper::ThreadSafeLayoutNodeHelpers;
pub struct RecalcStyleAndConstructFlows<'a> {
context: LayoutContext<'a>,
}
impl<'a> RecalcStyleAndConstructFlows<'a> {
pub fn layout_context(&self) -> &LayoutContext<'a> {
&self.context
}
}
impl<'a> RecalcStyleAndConstructFlows<'a> {
/// Creates a traversal context, taking ownership of the shared layout context.
pub fn new(context: LayoutContext<'a>) -> Self {
RecalcStyleAndConstructFlows {
context: context,
}
}
/// Consumes this traversal context, returning ownership of the shared layout
/// context to the caller.
pub fn destroy(self) -> LayoutContext<'a> {
self.context
}
}
#[allow(unsafe_code)]
impl<'a, E> DomTraversal<E> for RecalcStyleAndConstructFlows<'a>
where E: TElement,
E::ConcreteNode: LayoutNode,
E::FontMetricsProvider: Send,
{
fn process_preorder<F>(&self, traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>, node: E::ConcreteNode,
note_child: F)
where F: FnMut(E::ConcreteNode)
{
// FIXME(pcwalton): Stop allocating here. Ideally this should just be
// done by the HTML parser.
unsafe { node.initialize_data() };
if!node.is_text_node() {
let el = node.as_element().unwrap();
let mut data = el.mutate_data().unwrap();
recalc_style_at(self, traversal_data, context, el, &mut data, note_child);
}
}
fn process_postorder(&self, _style_context: &mut StyleContext<E>, node: E::ConcreteNode) {
construct_flows_at(&self.context, node);
}
fn text_node_needs_traversal(node: E::ConcreteNode, parent_data: &ElementData) -> bool {
// Text nodes never need styling. However, there are two cases they may need
// flow construction:
// (1) They child doesn't yet have layout data (preorder traversal initializes it).
// (2) The parent element has restyle damage (so the text flow also needs fixup).
node.get_raw_data().is_none() ||!parent_data.damage.is_empty()
}
fn shared_context(&self) -> &SharedStyleContext {
&self.context.style_context
}
}
/// A top-down traversal.
pub trait PreorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns true if this node should be processed and false if neither this node nor its
/// descendants should be processed.
fn should_process_subtree(&self, _flow: &mut Flow) -> bool {
true
}
/// Returns true if this node must be processed in-order. If this returns false,
/// we skip the operation for this node, but continue processing the descendants.
/// This is called *after* parent nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in preorder.
fn traverse(&self, flow: &mut Flow) {
if!self.should_process_subtree(flow) {
return;
}
if self.should_process(flow) {
self.process(flow);
}
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
}
/// Traverse the Absolute flow tree in preorder.
///
/// Traverse all your direct absolute descendants, who will then traverse
/// their direct absolute descendants.
///
/// Return true if the traversal is to continue or false to stop.
fn traverse_absolute_flows(&self, flow: &mut Flow) {
if self.should_process(flow) {
self.process(flow);
}
for descendant_link in flow.mut_base().abs_descendants.iter() {
self.traverse_absolute_flows(descendant_link)
}
}
}
/// A bottom-up traversal, with a optional in-order pass.
pub trait PostorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns false if this node must be processed in-order. If this returns false, we skip the
/// operation for this node, but continue processing the ancestors. This is called *after*
/// child nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in postorder.
fn traverse(&self, flow: &mut Flow) {
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
if self.should_process(flow) {
self.process(flow);
}
}
}
/// An in-order (sequential only) traversal.
pub trait InorderFlowTraversal {
/// The operation to perform. Returns the level of the tree we're at.
fn process(&mut self, flow: &mut Flow, level: u32);
/// Returns true if this node should be processed and false if neither this node nor its
/// descendants should be processed.
fn should_process_subtree(&mut self, _flow: &mut Flow) -> bool |
/// Traverses the tree in-order.
fn traverse(&mut self, flow: &mut Flow, level: u32) {
if!self.should_process_subtree(flow) {
return;
}
self.process(flow, level);
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid, level + 1);
}
}
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode);
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<N>(context: &LayoutContext, node: N)
where N: LayoutNode,
{
debug!("construct_flows_at: {:?}", node);
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || tnode.restyle_damage()!= RestyleDamage::empty() ||
node.as_element().map_or(false, |el| el.has_dirty_descendants()) {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:?}: {:x}",
tnode,
tnode.flow_debug_id());
}
}
tnode.mutate_layout_data().unwrap().flags.insert(::data::LayoutDataFlags::HAS_BEEN_TRAVERSED);
}
if let Some(el) = node.as_element() {
unsafe { el.unset_dirty_descendants(); }
}
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.contains(ServoRestyleDamage::BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Clone, Copy)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.intersects(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Clone, Copy)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow.base();
base.restyle_damage.intersects(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(FlowFlags::CAN_BE_FRAGMENTED)
}
}
pub struct ComputeStackingRelativePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeStackingRelativePositions<'a> {
#[inline]
fn should_process_subtree(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.contains(ServoRestyleDamage::REPOSITION)
}
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_stacking_relative_position(self.layout_context);
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::REPOSITION)
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
let parent_stacking_context_id = self.state.current_stacking_context_id;
self.state.current_stacking_context_id = flow.base().stacking_context_id;
let parent_clipping_and_scrolling = self.state.current_clipping_and_scrolling;
self.state.current_clipping_and_scrolling = flow.clipping_and_scrolling();
flow.build_display_list(&mut self.state);
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::REPAINT);
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
self.state.current_stacking_context_id = parent_stacking_context_id;
self.state.current_clipping_and_scrolling = parent_clipping_and_scrolling;
}
}
| {
true
} | identifier_body |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::LayoutContext;
use display_list_builder::DisplayListBuildState;
use flow::{FlowFlags, Flow, GetBaseFlow, ImmutableFlowUtils};
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use servo_config::opts;
use style::context::{SharedStyleContext, StyleContext};
use style::data::ElementData;
use style::dom::{NodeInfo, TElement, TNode};
use style::selector_parser::RestyleDamage;
use style::servo::restyle_damage::ServoRestyleDamage;
use style::traversal::{DomTraversal, recalc_style_at};
use style::traversal::PerLevelTraversalData;
use wrapper::{GetRawData, LayoutNodeLayoutData};
use wrapper::ThreadSafeLayoutNodeHelpers;
pub struct RecalcStyleAndConstructFlows<'a> {
context: LayoutContext<'a>,
}
impl<'a> RecalcStyleAndConstructFlows<'a> {
pub fn layout_context(&self) -> &LayoutContext<'a> {
&self.context
}
}
impl<'a> RecalcStyleAndConstructFlows<'a> {
/// Creates a traversal context, taking ownership of the shared layout context.
pub fn new(context: LayoutContext<'a>) -> Self {
RecalcStyleAndConstructFlows {
context: context,
}
}
/// Consumes this traversal context, returning ownership of the shared layout
/// context to the caller.
pub fn destroy(self) -> LayoutContext<'a> {
self.context
}
}
#[allow(unsafe_code)]
impl<'a, E> DomTraversal<E> for RecalcStyleAndConstructFlows<'a>
where E: TElement,
E::ConcreteNode: LayoutNode,
E::FontMetricsProvider: Send,
{
fn process_preorder<F>(&self, traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>, node: E::ConcreteNode,
note_child: F)
where F: FnMut(E::ConcreteNode)
{
// FIXME(pcwalton): Stop allocating here. Ideally this should just be
// done by the HTML parser.
unsafe { node.initialize_data() };
if!node.is_text_node() {
let el = node.as_element().unwrap();
let mut data = el.mutate_data().unwrap();
recalc_style_at(self, traversal_data, context, el, &mut data, note_child);
}
}
fn process_postorder(&self, _style_context: &mut StyleContext<E>, node: E::ConcreteNode) {
construct_flows_at(&self.context, node);
}
fn text_node_needs_traversal(node: E::ConcreteNode, parent_data: &ElementData) -> bool {
// Text nodes never need styling. However, there are two cases they may need
// flow construction:
// (1) They child doesn't yet have layout data (preorder traversal initializes it).
// (2) The parent element has restyle damage (so the text flow also needs fixup).
node.get_raw_data().is_none() ||!parent_data.damage.is_empty()
}
fn shared_context(&self) -> &SharedStyleContext {
&self.context.style_context
}
}
/// A top-down traversal.
pub trait PreorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns true if this node should be processed and false if neither this node nor its
/// descendants should be processed.
fn should_process_subtree(&self, _flow: &mut Flow) -> bool {
true
}
/// Returns true if this node must be processed in-order. If this returns false,
/// we skip the operation for this node, but continue processing the descendants.
/// This is called *after* parent nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in preorder.
fn traverse(&self, flow: &mut Flow) {
if!self.should_process_subtree(flow) {
return;
}
if self.should_process(flow) {
self.process(flow);
}
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
}
/// Traverse the Absolute flow tree in preorder.
///
/// Traverse all your direct absolute descendants, who will then traverse
/// their direct absolute descendants.
///
/// Return true if the traversal is to continue or false to stop.
fn traverse_absolute_flows(&self, flow: &mut Flow) {
if self.should_process(flow) {
self.process(flow);
}
for descendant_link in flow.mut_base().abs_descendants.iter() {
self.traverse_absolute_flows(descendant_link)
}
}
}
/// A bottom-up traversal, with a optional in-order pass.
pub trait PostorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns false if this node must be processed in-order. If this returns false, we skip the
/// operation for this node, but continue processing the ancestors. This is called *after*
/// child nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in postorder.
fn traverse(&self, flow: &mut Flow) {
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
if self.should_process(flow) {
self.process(flow);
}
}
}
/// An in-order (sequential only) traversal.
pub trait InorderFlowTraversal {
/// The operation to perform. Returns the level of the tree we're at.
fn process(&mut self, flow: &mut Flow, level: u32);
/// Returns true if this node should be processed and false if neither this node nor its
/// descendants should be processed.
fn should_process_subtree(&mut self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in-order.
fn traverse(&mut self, flow: &mut Flow, level: u32) {
if!self.should_process_subtree(flow) {
return;
}
self.process(flow, level);
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid, level + 1);
}
}
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode);
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<N>(context: &LayoutContext, node: N)
where N: LayoutNode,
{
debug!("construct_flows_at: {:?}", node);
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || tnode.restyle_damage()!= RestyleDamage::empty() ||
node.as_element().map_or(false, |el| el.has_dirty_descendants()) {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:?}: {:x}",
tnode,
tnode.flow_debug_id());
}
}
tnode.mutate_layout_data().unwrap().flags.insert(::data::LayoutDataFlags::HAS_BEEN_TRAVERSED);
}
if let Some(el) = node.as_element() {
unsafe { el.unset_dirty_descendants(); }
}
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.contains(ServoRestyleDamage::BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Clone, Copy)]
pub struct | <'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.intersects(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Clone, Copy)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow.base();
base.restyle_damage.intersects(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(FlowFlags::CAN_BE_FRAGMENTED)
}
}
pub struct ComputeStackingRelativePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeStackingRelativePositions<'a> {
#[inline]
fn should_process_subtree(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.contains(ServoRestyleDamage::REPOSITION)
}
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_stacking_relative_position(self.layout_context);
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::REPOSITION)
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
let parent_stacking_context_id = self.state.current_stacking_context_id;
self.state.current_stacking_context_id = flow.base().stacking_context_id;
let parent_clipping_and_scrolling = self.state.current_clipping_and_scrolling;
self.state.current_clipping_and_scrolling = flow.clipping_and_scrolling();
flow.build_display_list(&mut self.state);
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::REPAINT);
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
self.state.current_stacking_context_id = parent_stacking_context_id;
self.state.current_clipping_and_scrolling = parent_clipping_and_scrolling;
}
}
| AssignISizes | identifier_name |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::LayoutContext;
use display_list_builder::DisplayListBuildState;
use flow::{FlowFlags, Flow, GetBaseFlow, ImmutableFlowUtils};
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use servo_config::opts;
use style::context::{SharedStyleContext, StyleContext};
use style::data::ElementData;
use style::dom::{NodeInfo, TElement, TNode};
use style::selector_parser::RestyleDamage;
use style::servo::restyle_damage::ServoRestyleDamage;
use style::traversal::{DomTraversal, recalc_style_at};
use style::traversal::PerLevelTraversalData;
use wrapper::{GetRawData, LayoutNodeLayoutData};
use wrapper::ThreadSafeLayoutNodeHelpers;
pub struct RecalcStyleAndConstructFlows<'a> {
context: LayoutContext<'a>,
}
impl<'a> RecalcStyleAndConstructFlows<'a> {
pub fn layout_context(&self) -> &LayoutContext<'a> {
&self.context
}
}
impl<'a> RecalcStyleAndConstructFlows<'a> {
/// Creates a traversal context, taking ownership of the shared layout context.
pub fn new(context: LayoutContext<'a>) -> Self {
RecalcStyleAndConstructFlows {
context: context,
}
}
/// Consumes this traversal context, returning ownership of the shared layout
/// context to the caller.
pub fn destroy(self) -> LayoutContext<'a> {
self.context
}
}
#[allow(unsafe_code)]
impl<'a, E> DomTraversal<E> for RecalcStyleAndConstructFlows<'a>
where E: TElement,
E::ConcreteNode: LayoutNode,
E::FontMetricsProvider: Send,
{
fn process_preorder<F>(&self, traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>, node: E::ConcreteNode,
note_child: F)
where F: FnMut(E::ConcreteNode)
{
// FIXME(pcwalton): Stop allocating here. Ideally this should just be
// done by the HTML parser.
unsafe { node.initialize_data() };
if!node.is_text_node() {
let el = node.as_element().unwrap();
let mut data = el.mutate_data().unwrap();
recalc_style_at(self, traversal_data, context, el, &mut data, note_child);
}
}
fn process_postorder(&self, _style_context: &mut StyleContext<E>, node: E::ConcreteNode) {
construct_flows_at(&self.context, node);
}
fn text_node_needs_traversal(node: E::ConcreteNode, parent_data: &ElementData) -> bool {
// Text nodes never need styling. However, there are two cases they may need
// flow construction:
// (1) They child doesn't yet have layout data (preorder traversal initializes it).
// (2) The parent element has restyle damage (so the text flow also needs fixup).
node.get_raw_data().is_none() ||!parent_data.damage.is_empty()
}
fn shared_context(&self) -> &SharedStyleContext {
&self.context.style_context
}
}
/// A top-down traversal.
pub trait PreorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns true if this node should be processed and false if neither this node nor its
/// descendants should be processed.
fn should_process_subtree(&self, _flow: &mut Flow) -> bool {
true
}
/// Returns true if this node must be processed in-order. If this returns false,
/// we skip the operation for this node, but continue processing the descendants.
/// This is called *after* parent nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in preorder.
fn traverse(&self, flow: &mut Flow) {
if!self.should_process_subtree(flow) {
return;
}
if self.should_process(flow) {
self.process(flow);
}
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
}
/// Traverse the Absolute flow tree in preorder.
///
/// Traverse all your direct absolute descendants, who will then traverse
/// their direct absolute descendants.
///
/// Return true if the traversal is to continue or false to stop.
fn traverse_absolute_flows(&self, flow: &mut Flow) {
if self.should_process(flow) |
for descendant_link in flow.mut_base().abs_descendants.iter() {
self.traverse_absolute_flows(descendant_link)
}
}
}
/// A bottom-up traversal, with a optional in-order pass.
pub trait PostorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns false if this node must be processed in-order. If this returns false, we skip the
/// operation for this node, but continue processing the ancestors. This is called *after*
/// child nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in postorder.
fn traverse(&self, flow: &mut Flow) {
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
if self.should_process(flow) {
self.process(flow);
}
}
}
/// An in-order (sequential only) traversal.
pub trait InorderFlowTraversal {
/// The operation to perform. Returns the level of the tree we're at.
fn process(&mut self, flow: &mut Flow, level: u32);
/// Returns true if this node should be processed and false if neither this node nor its
/// descendants should be processed.
fn should_process_subtree(&mut self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in-order.
fn traverse(&mut self, flow: &mut Flow, level: u32) {
if!self.should_process_subtree(flow) {
return;
}
self.process(flow, level);
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid, level + 1);
}
}
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode);
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<N>(context: &LayoutContext, node: N)
where N: LayoutNode,
{
debug!("construct_flows_at: {:?}", node);
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || tnode.restyle_damage()!= RestyleDamage::empty() ||
node.as_element().map_or(false, |el| el.has_dirty_descendants()) {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:?}: {:x}",
tnode,
tnode.flow_debug_id());
}
}
tnode.mutate_layout_data().unwrap().flags.insert(::data::LayoutDataFlags::HAS_BEEN_TRAVERSED);
}
if let Some(el) = node.as_element() {
unsafe { el.unset_dirty_descendants(); }
}
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.contains(ServoRestyleDamage::BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Clone, Copy)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.intersects(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Clone, Copy)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow.base();
base.restyle_damage.intersects(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(FlowFlags::CAN_BE_FRAGMENTED)
}
}
pub struct ComputeStackingRelativePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeStackingRelativePositions<'a> {
#[inline]
fn should_process_subtree(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.contains(ServoRestyleDamage::REPOSITION)
}
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_stacking_relative_position(self.layout_context);
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::REPOSITION)
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
let parent_stacking_context_id = self.state.current_stacking_context_id;
self.state.current_stacking_context_id = flow.base().stacking_context_id;
let parent_clipping_and_scrolling = self.state.current_clipping_and_scrolling;
self.state.current_clipping_and_scrolling = flow.clipping_and_scrolling();
flow.build_display_list(&mut self.state);
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::REPAINT);
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
self.state.current_stacking_context_id = parent_stacking_context_id;
self.state.current_clipping_and_scrolling = parent_clipping_and_scrolling;
}
}
| {
self.process(flow);
} | conditional_block |
traversal.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::LayoutContext;
use display_list_builder::DisplayListBuildState;
use flow::{FlowFlags, Flow, GetBaseFlow, ImmutableFlowUtils};
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use servo_config::opts;
use style::context::{SharedStyleContext, StyleContext};
use style::data::ElementData;
use style::dom::{NodeInfo, TElement, TNode};
use style::selector_parser::RestyleDamage;
use style::servo::restyle_damage::ServoRestyleDamage;
use style::traversal::{DomTraversal, recalc_style_at};
use style::traversal::PerLevelTraversalData;
use wrapper::{GetRawData, LayoutNodeLayoutData};
use wrapper::ThreadSafeLayoutNodeHelpers;
pub struct RecalcStyleAndConstructFlows<'a> {
context: LayoutContext<'a>,
}
impl<'a> RecalcStyleAndConstructFlows<'a> {
pub fn layout_context(&self) -> &LayoutContext<'a> {
&self.context
}
}
impl<'a> RecalcStyleAndConstructFlows<'a> {
/// Creates a traversal context, taking ownership of the shared layout context.
pub fn new(context: LayoutContext<'a>) -> Self {
RecalcStyleAndConstructFlows {
context: context,
}
}
/// Consumes this traversal context, returning ownership of the shared layout
/// context to the caller.
pub fn destroy(self) -> LayoutContext<'a> {
self.context
}
}
#[allow(unsafe_code)]
impl<'a, E> DomTraversal<E> for RecalcStyleAndConstructFlows<'a>
where E: TElement,
E::ConcreteNode: LayoutNode,
E::FontMetricsProvider: Send,
{
fn process_preorder<F>(&self, traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>, node: E::ConcreteNode,
note_child: F)
where F: FnMut(E::ConcreteNode)
{
// FIXME(pcwalton): Stop allocating here. Ideally this should just be
// done by the HTML parser.
unsafe { node.initialize_data() };
if!node.is_text_node() {
let el = node.as_element().unwrap();
let mut data = el.mutate_data().unwrap();
recalc_style_at(self, traversal_data, context, el, &mut data, note_child);
}
}
fn process_postorder(&self, _style_context: &mut StyleContext<E>, node: E::ConcreteNode) {
construct_flows_at(&self.context, node);
}
fn text_node_needs_traversal(node: E::ConcreteNode, parent_data: &ElementData) -> bool {
// Text nodes never need styling. However, there are two cases they may need
// flow construction:
// (1) They child doesn't yet have layout data (preorder traversal initializes it).
// (2) The parent element has restyle damage (so the text flow also needs fixup).
node.get_raw_data().is_none() ||!parent_data.damage.is_empty()
}
fn shared_context(&self) -> &SharedStyleContext {
&self.context.style_context
}
}
/// A top-down traversal.
pub trait PreorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns true if this node should be processed and false if neither this node nor its
/// descendants should be processed.
fn should_process_subtree(&self, _flow: &mut Flow) -> bool {
true
}
/// Returns true if this node must be processed in-order. If this returns false,
/// we skip the operation for this node, but continue processing the descendants.
/// This is called *after* parent nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in preorder.
fn traverse(&self, flow: &mut Flow) {
if!self.should_process_subtree(flow) {
return;
}
if self.should_process(flow) {
self.process(flow);
}
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
}
/// Traverse the Absolute flow tree in preorder.
///
/// Traverse all your direct absolute descendants, who will then traverse
/// their direct absolute descendants.
///
/// Return true if the traversal is to continue or false to stop.
fn traverse_absolute_flows(&self, flow: &mut Flow) {
if self.should_process(flow) {
self.process(flow);
}
for descendant_link in flow.mut_base().abs_descendants.iter() {
self.traverse_absolute_flows(descendant_link)
}
}
}
/// A bottom-up traversal, with a optional in-order pass.
pub trait PostorderFlowTraversal {
/// The operation to perform. Return true to continue or false to stop.
fn process(&self, flow: &mut Flow);
/// Returns false if this node must be processed in-order. If this returns false, we skip the
/// operation for this node, but continue processing the ancestors. This is called *after*
/// child nodes are visited.
fn should_process(&self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in postorder.
fn traverse(&self, flow: &mut Flow) {
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
if self.should_process(flow) {
self.process(flow);
}
}
}
/// An in-order (sequential only) traversal.
pub trait InorderFlowTraversal {
/// The operation to perform. Returns the level of the tree we're at.
fn process(&mut self, flow: &mut Flow, level: u32);
/// Returns true if this node should be processed and false if neither this node nor its
/// descendants should be processed.
fn should_process_subtree(&mut self, _flow: &mut Flow) -> bool {
true
}
/// Traverses the tree in-order.
fn traverse(&mut self, flow: &mut Flow, level: u32) {
if!self.should_process_subtree(flow) {
return;
}
self.process(flow, level);
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid, level + 1);
}
}
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode);
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<N>(context: &LayoutContext, node: N)
where N: LayoutNode,
{
debug!("construct_flows_at: {:?}", node);
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || tnode.restyle_damage()!= RestyleDamage::empty() ||
node.as_element().map_or(false, |el| el.has_dirty_descendants()) {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:?}: {:x}",
tnode,
tnode.flow_debug_id());
}
}
tnode.mutate_layout_data().unwrap().flags.insert(::data::LayoutDataFlags::HAS_BEEN_TRAVERSED);
}
if let Some(el) = node.as_element() {
unsafe { el.unset_dirty_descendants(); }
}
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) { | flow.mut_base().restyle_damage.remove(ServoRestyleDamage::BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.contains(ServoRestyleDamage::BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Clone, Copy)]
pub struct AssignISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.intersects(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Clone, Copy)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow.base();
base.restyle_damage.intersects(ServoRestyleDamage::REFLOW_OUT_OF_FLOW | ServoRestyleDamage::REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(FlowFlags::CAN_BE_FRAGMENTED)
}
}
pub struct ComputeStackingRelativePositions<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PreorderFlowTraversal for ComputeStackingRelativePositions<'a> {
#[inline]
fn should_process_subtree(&self, flow: &mut Flow) -> bool {
flow.base().restyle_damage.contains(ServoRestyleDamage::REPOSITION)
}
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_stacking_relative_position(self.layout_context);
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::REPOSITION)
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
let parent_stacking_context_id = self.state.current_stacking_context_id;
self.state.current_stacking_context_id = flow.base().stacking_context_id;
let parent_clipping_and_scrolling = self.state.current_clipping_and_scrolling;
self.state.current_clipping_and_scrolling = flow.clipping_and_scrolling();
flow.build_display_list(&mut self.state);
flow.mut_base().restyle_damage.remove(ServoRestyleDamage::REPAINT);
for kid in flow.mut_base().child_iter_mut() {
self.traverse(kid);
}
self.state.current_stacking_context_id = parent_stacking_context_id;
self.state.current_clipping_and_scrolling = parent_clipping_and_scrolling;
}
} | flow.bubble_inline_sizes(); | random_line_split |
protocol.rs | use std::convert::{From, Into};
use std::fmt;
use self::OpCode::*;
/// Operation codes as part of rfc6455.
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum OpCode {
/// Indicates a continuation frame of a fragmented message.
Continue,
/// Indicates a text data frame.
Text,
/// Indicates a binary data frame.
Binary,
/// Indicates a close control frame.
Close,
/// Indicates a ping control frame.
Ping,
/// Indicates a pong control frame.
Pong,
/// Indicates an invalid opcode was received.
Bad,
}
impl OpCode {
/// Test whether the opcode indicates a control frame.
pub fn is_control(&self) -> bool {
match *self {
Text | Binary | Continue => false,
_ => true,
}
}
}
impl fmt::Display for OpCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Continue => write!(f, "CONTINUE"),
Text => write!(f, "TEXT"),
Binary => write!(f, "BINARY"),
Close => write!(f, "CLOSE"),
Ping => write!(f, "PING"),
Pong => write!(f, "PONG"),
Bad => write!(f, "BAD"),
}
}
}
impl Into<u8> for OpCode {
fn into(self) -> u8 {
match self {
Continue => 0,
Text => 1,
Binary => 2,
Close => 8,
Ping => 9,
Pong => 10,
Bad => {
debug_assert!(
false,
"Attempted to convert invalid opcode to u8. This is a bug."
);
8 // if this somehow happens, a close frame will help us tear down quickly
}
}
}
}
impl From<u8> for OpCode {
fn | (byte: u8) -> OpCode {
match byte {
0 => Continue,
1 => Text,
2 => Binary,
8 => Close,
9 => Ping,
10 => Pong,
_ => Bad,
}
}
}
use self::CloseCode::*;
/// Status code used to indicate why an endpoint is closing the WebSocket connection.
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum CloseCode {
/// Indicates a normal closure, meaning that the purpose for
/// which the connection was established has been fulfilled.
Normal,
/// Indicates that an endpoint is "going away", such as a server
/// going down or a browser having navigated away from a page.
Away,
/// Indicates that an endpoint is terminating the connection due
/// to a protocol error.
Protocol,
/// Indicates that an endpoint is terminating the connection
/// because it has received a type of data it cannot accept (e.g., an
/// endpoint that understands only text data MAY send this if it
/// receives a binary message).
Unsupported,
/// Indicates that no status code was included in a closing frame. This
/// close code makes it possible to use a single method, `on_close` to
/// handle even cases where no close code was provided.
Status,
/// Indicates an abnormal closure. If the abnormal closure was due to an
/// error, this close code will not be used. Instead, the `on_error` method
/// of the handler will be called with the error. However, if the connection
/// is simply dropped, without an error, this close code will be sent to the
/// handler.
Abnormal,
/// Indicates that an endpoint is terminating the connection
/// because it has received data within a message that was not
/// consistent with the type of the message (e.g., non-UTF-8 [RFC3629]
/// data within a text message).
Invalid,
/// Indicates that an endpoint is terminating the connection
/// because it has received a message that violates its policy. This
/// is a generic status code that can be returned when there is no
/// other more suitable status code (e.g., Unsupported or Size) or if there
/// is a need to hide specific details about the policy.
Policy,
/// Indicates that an endpoint is terminating the connection
/// because it has received a message that is too big for it to
/// process.
Size,
/// Indicates that an endpoint (client) is terminating the
/// connection because it has expected the server to negotiate one or
/// more extension, but the server didn't return them in the response
/// message of the WebSocket handshake. The list of extensions that
/// are needed should be given as the reason for closing.
/// Note that this status code is not used by the server, because it
/// can fail the WebSocket handshake instead.
Extension,
/// Indicates that a server is terminating the connection because
/// it encountered an unexpected condition that prevented it from
/// fulfilling the request.
Error,
/// Indicates that the server is restarting. A client may choose to reconnect,
/// and if it does, it should use a randomized delay of 5-30 seconds between attempts.
Restart,
/// Indicates that the server is overloaded and the client should either connect
/// to a different IP (when multiple targets exist), or reconnect to the same IP
/// when a user has performed an action.
Again,
#[doc(hidden)]
Tls,
#[doc(hidden)]
Empty,
#[doc(hidden)]
Other(u16),
}
impl Into<u16> for CloseCode {
fn into(self) -> u16 {
match self {
Normal => 1000,
Away => 1001,
Protocol => 1002,
Unsupported => 1003,
Status => 1005,
Abnormal => 1006,
Invalid => 1007,
Policy => 1008,
Size => 1009,
Extension => 1010,
Error => 1011,
Restart => 1012,
Again => 1013,
Tls => 1015,
Empty => 0,
Other(code) => code,
}
}
}
impl From<u16> for CloseCode {
fn from(code: u16) -> CloseCode {
match code {
1000 => Normal,
1001 => Away,
1002 => Protocol,
1003 => Unsupported,
1005 => Status,
1006 => Abnormal,
1007 => Invalid,
1008 => Policy,
1009 => Size,
1010 => Extension,
1011 => Error,
1012 => Restart,
1013 => Again,
1015 => Tls,
0 => Empty,
_ => Other(code),
}
}
}
mod test {
#![allow(unused_imports, unused_variables, dead_code)]
use super::*;
#[test]
fn opcode_from_u8() {
let byte = 2u8;
assert_eq!(OpCode::from(byte), OpCode::Binary);
}
#[test]
fn opcode_into_u8() {
let text = OpCode::Text;
let byte: u8 = text.into();
assert_eq!(byte, 1u8);
}
#[test]
fn closecode_from_u16() {
let byte = 1008u16;
assert_eq!(CloseCode::from(byte), CloseCode::Policy);
}
#[test]
fn closecode_into_u16() {
let text = CloseCode::Away;
let byte: u16 = text.into();
assert_eq!(byte, 1001u16);
}
}
| from | identifier_name |
protocol.rs | use std::convert::{From, Into};
use std::fmt;
use self::OpCode::*;
/// Operation codes as part of rfc6455.
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum OpCode {
/// Indicates a continuation frame of a fragmented message.
Continue,
/// Indicates a text data frame.
Text,
/// Indicates a binary data frame.
Binary,
/// Indicates a close control frame.
Close,
/// Indicates a ping control frame.
Ping,
/// Indicates a pong control frame.
Pong,
/// Indicates an invalid opcode was received.
Bad,
}
impl OpCode {
/// Test whether the opcode indicates a control frame.
pub fn is_control(&self) -> bool {
match *self {
Text | Binary | Continue => false,
_ => true,
}
}
}
impl fmt::Display for OpCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Continue => write!(f, "CONTINUE"),
Text => write!(f, "TEXT"),
Binary => write!(f, "BINARY"),
Close => write!(f, "CLOSE"),
Ping => write!(f, "PING"),
Pong => write!(f, "PONG"),
Bad => write!(f, "BAD"),
}
}
}
impl Into<u8> for OpCode {
fn into(self) -> u8 {
match self {
Continue => 0,
Text => 1,
Binary => 2,
Close => 8,
Ping => 9,
Pong => 10,
Bad => {
debug_assert!(
false,
"Attempted to convert invalid opcode to u8. This is a bug."
);
8 // if this somehow happens, a close frame will help us tear down quickly
}
}
}
}
impl From<u8> for OpCode {
fn from(byte: u8) -> OpCode {
match byte {
0 => Continue,
1 => Text,
2 => Binary,
8 => Close,
9 => Ping,
10 => Pong,
_ => Bad,
}
}
}
use self::CloseCode::*;
/// Status code used to indicate why an endpoint is closing the WebSocket connection.
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum CloseCode {
/// Indicates a normal closure, meaning that the purpose for
/// which the connection was established has been fulfilled.
Normal,
/// Indicates that an endpoint is "going away", such as a server
/// going down or a browser having navigated away from a page.
Away,
/// Indicates that an endpoint is terminating the connection due
/// to a protocol error.
Protocol,
/// Indicates that an endpoint is terminating the connection
/// because it has received a type of data it cannot accept (e.g., an
/// endpoint that understands only text data MAY send this if it
/// receives a binary message).
Unsupported,
/// Indicates that no status code was included in a closing frame. This
/// close code makes it possible to use a single method, `on_close` to
/// handle even cases where no close code was provided.
Status,
/// Indicates an abnormal closure. If the abnormal closure was due to an
/// error, this close code will not be used. Instead, the `on_error` method
/// of the handler will be called with the error. However, if the connection
/// is simply dropped, without an error, this close code will be sent to the
/// handler.
Abnormal,
/// Indicates that an endpoint is terminating the connection
/// because it has received data within a message that was not
/// consistent with the type of the message (e.g., non-UTF-8 [RFC3629]
/// data within a text message).
Invalid,
/// Indicates that an endpoint is terminating the connection
/// because it has received a message that violates its policy. This
/// is a generic status code that can be returned when there is no
/// other more suitable status code (e.g., Unsupported or Size) or if there
/// is a need to hide specific details about the policy.
Policy,
/// Indicates that an endpoint is terminating the connection
/// because it has received a message that is too big for it to
/// process.
Size,
/// Indicates that an endpoint (client) is terminating the
/// connection because it has expected the server to negotiate one or
/// more extension, but the server didn't return them in the response
/// message of the WebSocket handshake. The list of extensions that
/// are needed should be given as the reason for closing.
/// Note that this status code is not used by the server, because it
/// can fail the WebSocket handshake instead.
Extension,
/// Indicates that a server is terminating the connection because
/// it encountered an unexpected condition that prevented it from
/// fulfilling the request.
Error,
/// Indicates that the server is restarting. A client may choose to reconnect,
/// and if it does, it should use a randomized delay of 5-30 seconds between attempts.
Restart,
/// Indicates that the server is overloaded and the client should either connect
/// to a different IP (when multiple targets exist), or reconnect to the same IP
/// when a user has performed an action.
Again,
#[doc(hidden)]
Tls,
#[doc(hidden)]
Empty,
#[doc(hidden)] | }
impl Into<u16> for CloseCode {
fn into(self) -> u16 {
match self {
Normal => 1000,
Away => 1001,
Protocol => 1002,
Unsupported => 1003,
Status => 1005,
Abnormal => 1006,
Invalid => 1007,
Policy => 1008,
Size => 1009,
Extension => 1010,
Error => 1011,
Restart => 1012,
Again => 1013,
Tls => 1015,
Empty => 0,
Other(code) => code,
}
}
}
impl From<u16> for CloseCode {
fn from(code: u16) -> CloseCode {
match code {
1000 => Normal,
1001 => Away,
1002 => Protocol,
1003 => Unsupported,
1005 => Status,
1006 => Abnormal,
1007 => Invalid,
1008 => Policy,
1009 => Size,
1010 => Extension,
1011 => Error,
1012 => Restart,
1013 => Again,
1015 => Tls,
0 => Empty,
_ => Other(code),
}
}
}
mod test {
#![allow(unused_imports, unused_variables, dead_code)]
use super::*;
#[test]
fn opcode_from_u8() {
let byte = 2u8;
assert_eq!(OpCode::from(byte), OpCode::Binary);
}
#[test]
fn opcode_into_u8() {
let text = OpCode::Text;
let byte: u8 = text.into();
assert_eq!(byte, 1u8);
}
#[test]
fn closecode_from_u16() {
let byte = 1008u16;
assert_eq!(CloseCode::from(byte), CloseCode::Policy);
}
#[test]
fn closecode_into_u16() {
let text = CloseCode::Away;
let byte: u16 = text.into();
assert_eq!(byte, 1001u16);
}
} | Other(u16), | random_line_split |
protocol.rs | use std::convert::{From, Into};
use std::fmt;
use self::OpCode::*;
/// Operation codes as part of rfc6455.
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum OpCode {
/// Indicates a continuation frame of a fragmented message.
Continue,
/// Indicates a text data frame.
Text,
/// Indicates a binary data frame.
Binary,
/// Indicates a close control frame.
Close,
/// Indicates a ping control frame.
Ping,
/// Indicates a pong control frame.
Pong,
/// Indicates an invalid opcode was received.
Bad,
}
impl OpCode {
/// Test whether the opcode indicates a control frame.
pub fn is_control(&self) -> bool {
match *self {
Text | Binary | Continue => false,
_ => true,
}
}
}
impl fmt::Display for OpCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Continue => write!(f, "CONTINUE"),
Text => write!(f, "TEXT"),
Binary => write!(f, "BINARY"),
Close => write!(f, "CLOSE"),
Ping => write!(f, "PING"),
Pong => write!(f, "PONG"),
Bad => write!(f, "BAD"),
}
}
}
impl Into<u8> for OpCode {
fn into(self) -> u8 |
}
impl From<u8> for OpCode {
fn from(byte: u8) -> OpCode {
match byte {
0 => Continue,
1 => Text,
2 => Binary,
8 => Close,
9 => Ping,
10 => Pong,
_ => Bad,
}
}
}
use self::CloseCode::*;
/// Status code used to indicate why an endpoint is closing the WebSocket connection.
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum CloseCode {
/// Indicates a normal closure, meaning that the purpose for
/// which the connection was established has been fulfilled.
Normal,
/// Indicates that an endpoint is "going away", such as a server
/// going down or a browser having navigated away from a page.
Away,
/// Indicates that an endpoint is terminating the connection due
/// to a protocol error.
Protocol,
/// Indicates that an endpoint is terminating the connection
/// because it has received a type of data it cannot accept (e.g., an
/// endpoint that understands only text data MAY send this if it
/// receives a binary message).
Unsupported,
/// Indicates that no status code was included in a closing frame. This
/// close code makes it possible to use a single method, `on_close` to
/// handle even cases where no close code was provided.
Status,
/// Indicates an abnormal closure. If the abnormal closure was due to an
/// error, this close code will not be used. Instead, the `on_error` method
/// of the handler will be called with the error. However, if the connection
/// is simply dropped, without an error, this close code will be sent to the
/// handler.
Abnormal,
/// Indicates that an endpoint is terminating the connection
/// because it has received data within a message that was not
/// consistent with the type of the message (e.g., non-UTF-8 [RFC3629]
/// data within a text message).
Invalid,
/// Indicates that an endpoint is terminating the connection
/// because it has received a message that violates its policy. This
/// is a generic status code that can be returned when there is no
/// other more suitable status code (e.g., Unsupported or Size) or if there
/// is a need to hide specific details about the policy.
Policy,
/// Indicates that an endpoint is terminating the connection
/// because it has received a message that is too big for it to
/// process.
Size,
/// Indicates that an endpoint (client) is terminating the
/// connection because it has expected the server to negotiate one or
/// more extension, but the server didn't return them in the response
/// message of the WebSocket handshake. The list of extensions that
/// are needed should be given as the reason for closing.
/// Note that this status code is not used by the server, because it
/// can fail the WebSocket handshake instead.
Extension,
/// Indicates that a server is terminating the connection because
/// it encountered an unexpected condition that prevented it from
/// fulfilling the request.
Error,
/// Indicates that the server is restarting. A client may choose to reconnect,
/// and if it does, it should use a randomized delay of 5-30 seconds between attempts.
Restart,
/// Indicates that the server is overloaded and the client should either connect
/// to a different IP (when multiple targets exist), or reconnect to the same IP
/// when a user has performed an action.
Again,
#[doc(hidden)]
Tls,
#[doc(hidden)]
Empty,
#[doc(hidden)]
Other(u16),
}
impl Into<u16> for CloseCode {
fn into(self) -> u16 {
match self {
Normal => 1000,
Away => 1001,
Protocol => 1002,
Unsupported => 1003,
Status => 1005,
Abnormal => 1006,
Invalid => 1007,
Policy => 1008,
Size => 1009,
Extension => 1010,
Error => 1011,
Restart => 1012,
Again => 1013,
Tls => 1015,
Empty => 0,
Other(code) => code,
}
}
}
impl From<u16> for CloseCode {
fn from(code: u16) -> CloseCode {
match code {
1000 => Normal,
1001 => Away,
1002 => Protocol,
1003 => Unsupported,
1005 => Status,
1006 => Abnormal,
1007 => Invalid,
1008 => Policy,
1009 => Size,
1010 => Extension,
1011 => Error,
1012 => Restart,
1013 => Again,
1015 => Tls,
0 => Empty,
_ => Other(code),
}
}
}
mod test {
#![allow(unused_imports, unused_variables, dead_code)]
use super::*;
#[test]
fn opcode_from_u8() {
let byte = 2u8;
assert_eq!(OpCode::from(byte), OpCode::Binary);
}
#[test]
fn opcode_into_u8() {
let text = OpCode::Text;
let byte: u8 = text.into();
assert_eq!(byte, 1u8);
}
#[test]
fn closecode_from_u16() {
let byte = 1008u16;
assert_eq!(CloseCode::from(byte), CloseCode::Policy);
}
#[test]
fn closecode_into_u16() {
let text = CloseCode::Away;
let byte: u16 = text.into();
assert_eq!(byte, 1001u16);
}
}
| {
match self {
Continue => 0,
Text => 1,
Binary => 2,
Close => 8,
Ping => 9,
Pong => 10,
Bad => {
debug_assert!(
false,
"Attempted to convert invalid opcode to u8. This is a bug."
);
8 // if this somehow happens, a close frame will help us tear down quickly
}
}
} | identifier_body |
mod.rs | //! Access to open sqlite3 database by filename.
//!
//! The `core` module requires explicit authority to access files and such,
//! following the principle of least authority.
//!
//! This module provides the privileged functions to create such authorities.
//!
//! *TODO: move `mod access` to its own crate so that linking to `sqlite3` doesn't
//! bring in this ambient authority.*
use libc::c_int;
use std::ptr;
use super::SqliteResult;
use core::{Access, DatabaseConnection, str_charstar};
use ffi;
use access::flags::OpenFlags;
// submodule KLUDGE around missing_docs for bitflags!()
#[allow(missing_docs)]
pub mod flags;
/// Open a database by filename.
///
/// *TODO: test for "Note that sqlite3_open() can be used to either
/// open existing database files or to create and open new database
/// files."*
///
///
/// Refer to [Opening A New Database][open] regarding URI filenames.
///
/// [open]: http://www.sqlite.org/c3ref/open.html
pub fn open(filename: &str, flags: Option<OpenFlags>) -> SqliteResult<DatabaseConnection> { | ByFilename {
filename: filename,
flags: flags.unwrap_or_default()
})
}
/// Access to a database by filename
pub struct ByFilename<'a> {
/// Filename or sqlite3 style URI.
pub filename: &'a str,
/// Flags for additional control over the new database connection.
pub flags: OpenFlags
}
impl<'a> Access for ByFilename<'a> {
fn open(self, db: *mut *mut ffi::sqlite3) -> c_int {
let c_filename = str_charstar(self.filename).as_ptr();
let flags = self.flags.bits();
unsafe { ffi::sqlite3_open_v2(c_filename, db, flags, ptr::null()) }
}
}
#[cfg(test)]
mod tests {
use std::default::Default;
use super::ByFilename;
use core::DatabaseConnection;
use std::env::temp_dir;
#[test]
fn open_file_db() {
let mut temp_directory = temp_dir();
temp_directory.push("db1");
let path = temp_directory.into_os_string().into_string().unwrap();
DatabaseConnection::new(
ByFilename {
filename: path.as_ref(), flags: Default::default()
})
.unwrap();
}
}
// Local Variables:
// flycheck-rust-crate-root: "lib.rs"
// End: | DatabaseConnection::new( | random_line_split |
mod.rs | //! Access to open sqlite3 database by filename.
//!
//! The `core` module requires explicit authority to access files and such,
//! following the principle of least authority.
//!
//! This module provides the privileged functions to create such authorities.
//!
//! *TODO: move `mod access` to its own crate so that linking to `sqlite3` doesn't
//! bring in this ambient authority.*
use libc::c_int;
use std::ptr;
use super::SqliteResult;
use core::{Access, DatabaseConnection, str_charstar};
use ffi;
use access::flags::OpenFlags;
// submodule KLUDGE around missing_docs for bitflags!()
#[allow(missing_docs)]
pub mod flags;
/// Open a database by filename.
///
/// *TODO: test for "Note that sqlite3_open() can be used to either
/// open existing database files or to create and open new database
/// files."*
///
///
/// Refer to [Opening A New Database][open] regarding URI filenames.
///
/// [open]: http://www.sqlite.org/c3ref/open.html
pub fn | (filename: &str, flags: Option<OpenFlags>) -> SqliteResult<DatabaseConnection> {
DatabaseConnection::new(
ByFilename {
filename: filename,
flags: flags.unwrap_or_default()
})
}
/// Access to a database by filename
pub struct ByFilename<'a> {
/// Filename or sqlite3 style URI.
pub filename: &'a str,
/// Flags for additional control over the new database connection.
pub flags: OpenFlags
}
impl<'a> Access for ByFilename<'a> {
fn open(self, db: *mut *mut ffi::sqlite3) -> c_int {
let c_filename = str_charstar(self.filename).as_ptr();
let flags = self.flags.bits();
unsafe { ffi::sqlite3_open_v2(c_filename, db, flags, ptr::null()) }
}
}
#[cfg(test)]
mod tests {
use std::default::Default;
use super::ByFilename;
use core::DatabaseConnection;
use std::env::temp_dir;
#[test]
fn open_file_db() {
let mut temp_directory = temp_dir();
temp_directory.push("db1");
let path = temp_directory.into_os_string().into_string().unwrap();
DatabaseConnection::new(
ByFilename {
filename: path.as_ref(), flags: Default::default()
})
.unwrap();
}
}
// Local Variables:
// flycheck-rust-crate-root: "lib.rs"
// End:
| open | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(core_intrinsics)]
#![feature(link_args)]
#![feature(plugin)]
#![feature(unicode)]
#![allow(non_camel_case_types)]
#![plugin(plugins)]
#[macro_use] | extern crate log;
extern crate servo;
extern crate compositing;
extern crate euclid;
extern crate gfx_traits;
extern crate gleam;
extern crate glutin_app;
extern crate rustc_unicode;
extern crate script_traits;
extern crate servo_url;
extern crate style_traits;
extern crate net_traits;
extern crate msg;
extern crate util;
extern crate libc;
#[cfg(target_os="macos")]
#[link_args="-Xlinker -undefined -Xlinker dynamic_lookup"]
extern { }
#[cfg(target_os="macos")]
extern crate cocoa;
#[cfg(target_os="macos")]
#[macro_use]
extern crate objc;
#[cfg(target_os="linux")] extern crate x11;
// Must come first.
pub mod macros;
pub mod browser;
pub mod browser_host;
pub mod command_line;
pub mod cookie;
pub mod core;
pub mod drag_data;
pub mod eutil;
pub mod frame;
pub mod interfaces;
pub mod print_settings;
pub mod process_message;
pub mod render_handler;
pub mod request;
pub mod request_context;
pub mod response;
pub mod stream;
pub mod string;
pub mod string_list;
pub mod string_map;
pub mod string_multimap;
pub mod stubs;
pub mod switches;
pub mod task;
pub mod types;
pub mod urlrequest;
pub mod v8;
pub mod values;
pub mod window;
pub mod wrappers;
pub mod xml_reader;
pub mod zip_reader; | random_line_split |
|
neon.rs | use std::process;
use crate::{Config, NeonGenesisNode, BurnchainController,
BitcoinRegtestController, Keychain};
use stacks::chainstate::burn::db::burndb::BurnDB;
use stacks::burnchains::bitcoin::address::BitcoinAddress;
use stacks::burnchains::Address;
use stacks::burnchains::bitcoin::{BitcoinNetworkType,
address::{BitcoinAddressType}};
use super::RunLoopCallbacks;
/// Coordinating a node running in neon mode.
#[cfg(test)]
pub struct RunLoop {
config: Config,
pub callbacks: RunLoopCallbacks,
blocks_processed: std::sync::Arc<std::sync::atomic::AtomicU64>,
}
#[cfg(not(test))]
pub struct RunLoop {
config: Config,
pub callbacks: RunLoopCallbacks,
}
impl RunLoop {
/// Sets up a runloop and node, given a config.
#[cfg(not(test))]
pub fn new(config: Config) -> Self {
Self {
config,
callbacks: RunLoopCallbacks::new(),
}
}
#[cfg(test)]
pub fn new(config: Config) -> Self {
Self {
config,
callbacks: RunLoopCallbacks::new(),
blocks_processed: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)),
}
}
#[cfg(test)]
pub fn get_blocks_processed_arc(&self) -> std::sync::Arc<std::sync::atomic::AtomicU64> {
self.blocks_processed.clone()
}
#[cfg(not(test))]
fn get_blocks_processed_arc(&self) {
}
#[cfg(test)]
fn bump_blocks_processed(&self) {
self.blocks_processed.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
}
#[cfg(not(test))]
fn bump_blocks_processed(&self) {
}
/// Starts the testnet runloop.
///
/// This function will block by looping infinitely.
/// It will start the burnchain (separate thread), set-up a channel in
/// charge of coordinating the new blocks coming from the burnchain and
/// the nodes, taking turns on tenures.
pub fn start(&mut self, _expected_num_rounds: u64) {
// Initialize and start the burnchain.
let mut burnchain = BitcoinRegtestController::new(self.config.clone());
let is_miner = if self.config.node.miner {
let keychain = Keychain::default(self.config.node.seed.clone());
let btc_addr = BitcoinAddress::from_bytes(
BitcoinNetworkType::Regtest,
BitcoinAddressType::PublicKeyHash,
&Keychain::address_from_burnchain_signer(&keychain.get_burnchain_signer()).to_bytes())
.unwrap();
info!("Miner node: checking UTXOs at address: {}", btc_addr);
let utxos = burnchain.get_utxos(
&keychain.generate_op_signer().get_public_key(), 1);
if utxos.is_none() {
error!("Miner node: UTXOs not found. Switching to Follower node. Restart node when you get some UTXOs.");
false
} else {
info!("Miner node: starting up, UTXOs found.");
true
}
} else {
info!("Follower node: starting up");
false
};
let mut burnchain_tip = burnchain.start();
let mut block_height = burnchain_tip.block_snapshot.block_height;
// setup genesis
let node = NeonGenesisNode::new(self.config.clone(), |_| {});
let mut node = if is_miner {
node.into_initialized_leader_node(burnchain_tip.clone(), self.get_blocks_processed_arc())
} else {
node.into_initialized_node(burnchain_tip.clone(), self.get_blocks_processed_arc())
};
// TODO (hack) instantiate the burndb in the burnchain
let _ = burnchain.burndb_mut();
// Start the runloop
info!("Begin run loop");
self.bump_blocks_processed();
loop {
burnchain_tip = burnchain.sync();
let next_height = burnchain_tip.block_snapshot.block_height;
if next_height <= block_height {
warn!("burnchain.sync() did not progress block height");
continue;
}
// first, let's process all blocks in (block_height, next_height]
for block_to_process in (block_height+1)..(next_height+1) {
let block = {
let ic = burnchain.burndb_ref().index_conn();
BurnDB::get_ancestor_snapshot(
&ic, block_to_process, &burnchain_tip.block_snapshot.burn_header_hash)
.unwrap()
.expect("Failed to find block in fork processed by bitcoin indexer")
};
let burn_header_hash = block.burn_header_hash;
// Have the node process the new block, that can include, or not, a sortition.
node.process_burnchain_state(burnchain.burndb_mut(),
&burn_header_hash);
// Now, tell the relayer to check if it won a sortition during this block,
// and, if so, to process and advertize the block
//
// _this will block if the relayer's buffer is full_
if!node.relayer_sortition_notify() {
// relayer hung up, exit.
error!("Block relayer and miner hung up, exiting."); | }
}
// now, let's tell the miner to try and mine.
if!node.relayer_issue_tenure() {
// relayer hung up, exit.
error!("Block relayer and miner hung up, exiting.");
process::exit(1);
}
block_height = next_height;
}
}
} | process::exit(1); | random_line_split |
neon.rs | use std::process;
use crate::{Config, NeonGenesisNode, BurnchainController,
BitcoinRegtestController, Keychain};
use stacks::chainstate::burn::db::burndb::BurnDB;
use stacks::burnchains::bitcoin::address::BitcoinAddress;
use stacks::burnchains::Address;
use stacks::burnchains::bitcoin::{BitcoinNetworkType,
address::{BitcoinAddressType}};
use super::RunLoopCallbacks;
/// Coordinating a node running in neon mode.
#[cfg(test)]
pub struct RunLoop {
config: Config,
pub callbacks: RunLoopCallbacks,
blocks_processed: std::sync::Arc<std::sync::atomic::AtomicU64>,
}
#[cfg(not(test))]
pub struct RunLoop {
config: Config,
pub callbacks: RunLoopCallbacks,
}
impl RunLoop {
/// Sets up a runloop and node, given a config.
#[cfg(not(test))]
pub fn new(config: Config) -> Self {
Self {
config,
callbacks: RunLoopCallbacks::new(),
}
}
#[cfg(test)]
pub fn new(config: Config) -> Self {
Self {
config,
callbacks: RunLoopCallbacks::new(),
blocks_processed: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)),
}
}
#[cfg(test)]
pub fn get_blocks_processed_arc(&self) -> std::sync::Arc<std::sync::atomic::AtomicU64> {
self.blocks_processed.clone()
}
#[cfg(not(test))]
fn get_blocks_processed_arc(&self) {
}
#[cfg(test)]
fn bump_blocks_processed(&self) {
self.blocks_processed.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
}
#[cfg(not(test))]
fn bump_blocks_processed(&self) {
}
/// Starts the testnet runloop.
///
/// This function will block by looping infinitely.
/// It will start the burnchain (separate thread), set-up a channel in
/// charge of coordinating the new blocks coming from the burnchain and
/// the nodes, taking turns on tenures.
pub fn start(&mut self, _expected_num_rounds: u64) {
// Initialize and start the burnchain.
let mut burnchain = BitcoinRegtestController::new(self.config.clone());
let is_miner = if self.config.node.miner {
let keychain = Keychain::default(self.config.node.seed.clone());
let btc_addr = BitcoinAddress::from_bytes(
BitcoinNetworkType::Regtest,
BitcoinAddressType::PublicKeyHash,
&Keychain::address_from_burnchain_signer(&keychain.get_burnchain_signer()).to_bytes())
.unwrap();
info!("Miner node: checking UTXOs at address: {}", btc_addr);
let utxos = burnchain.get_utxos(
&keychain.generate_op_signer().get_public_key(), 1);
if utxos.is_none() {
error!("Miner node: UTXOs not found. Switching to Follower node. Restart node when you get some UTXOs.");
false
} else {
info!("Miner node: starting up, UTXOs found.");
true
}
} else {
info!("Follower node: starting up");
false
};
let mut burnchain_tip = burnchain.start();
let mut block_height = burnchain_tip.block_snapshot.block_height;
// setup genesis
let node = NeonGenesisNode::new(self.config.clone(), |_| {});
let mut node = if is_miner {
node.into_initialized_leader_node(burnchain_tip.clone(), self.get_blocks_processed_arc())
} else {
node.into_initialized_node(burnchain_tip.clone(), self.get_blocks_processed_arc())
};
// TODO (hack) instantiate the burndb in the burnchain
let _ = burnchain.burndb_mut();
// Start the runloop
info!("Begin run loop");
self.bump_blocks_processed();
loop {
burnchain_tip = burnchain.sync();
let next_height = burnchain_tip.block_snapshot.block_height;
if next_height <= block_height |
// first, let's process all blocks in (block_height, next_height]
for block_to_process in (block_height+1)..(next_height+1) {
let block = {
let ic = burnchain.burndb_ref().index_conn();
BurnDB::get_ancestor_snapshot(
&ic, block_to_process, &burnchain_tip.block_snapshot.burn_header_hash)
.unwrap()
.expect("Failed to find block in fork processed by bitcoin indexer")
};
let burn_header_hash = block.burn_header_hash;
// Have the node process the new block, that can include, or not, a sortition.
node.process_burnchain_state(burnchain.burndb_mut(),
&burn_header_hash);
// Now, tell the relayer to check if it won a sortition during this block,
// and, if so, to process and advertize the block
//
// _this will block if the relayer's buffer is full_
if!node.relayer_sortition_notify() {
// relayer hung up, exit.
error!("Block relayer and miner hung up, exiting.");
process::exit(1);
}
}
// now, let's tell the miner to try and mine.
if!node.relayer_issue_tenure() {
// relayer hung up, exit.
error!("Block relayer and miner hung up, exiting.");
process::exit(1);
}
block_height = next_height;
}
}
}
| {
warn!("burnchain.sync() did not progress block height");
continue;
} | conditional_block |
neon.rs | use std::process;
use crate::{Config, NeonGenesisNode, BurnchainController,
BitcoinRegtestController, Keychain};
use stacks::chainstate::burn::db::burndb::BurnDB;
use stacks::burnchains::bitcoin::address::BitcoinAddress;
use stacks::burnchains::Address;
use stacks::burnchains::bitcoin::{BitcoinNetworkType,
address::{BitcoinAddressType}};
use super::RunLoopCallbacks;
/// Coordinating a node running in neon mode.
#[cfg(test)]
pub struct RunLoop {
config: Config,
pub callbacks: RunLoopCallbacks,
blocks_processed: std::sync::Arc<std::sync::atomic::AtomicU64>,
}
#[cfg(not(test))]
pub struct RunLoop {
config: Config,
pub callbacks: RunLoopCallbacks,
}
impl RunLoop {
/// Sets up a runloop and node, given a config.
#[cfg(not(test))]
pub fn new(config: Config) -> Self {
Self {
config,
callbacks: RunLoopCallbacks::new(),
}
}
#[cfg(test)]
pub fn new(config: Config) -> Self {
Self {
config,
callbacks: RunLoopCallbacks::new(),
blocks_processed: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)),
}
}
#[cfg(test)]
pub fn | (&self) -> std::sync::Arc<std::sync::atomic::AtomicU64> {
self.blocks_processed.clone()
}
#[cfg(not(test))]
fn get_blocks_processed_arc(&self) {
}
#[cfg(test)]
fn bump_blocks_processed(&self) {
self.blocks_processed.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
}
#[cfg(not(test))]
fn bump_blocks_processed(&self) {
}
/// Starts the testnet runloop.
///
/// This function will block by looping infinitely.
/// It will start the burnchain (separate thread), set-up a channel in
/// charge of coordinating the new blocks coming from the burnchain and
/// the nodes, taking turns on tenures.
pub fn start(&mut self, _expected_num_rounds: u64) {
// Initialize and start the burnchain.
let mut burnchain = BitcoinRegtestController::new(self.config.clone());
let is_miner = if self.config.node.miner {
let keychain = Keychain::default(self.config.node.seed.clone());
let btc_addr = BitcoinAddress::from_bytes(
BitcoinNetworkType::Regtest,
BitcoinAddressType::PublicKeyHash,
&Keychain::address_from_burnchain_signer(&keychain.get_burnchain_signer()).to_bytes())
.unwrap();
info!("Miner node: checking UTXOs at address: {}", btc_addr);
let utxos = burnchain.get_utxos(
&keychain.generate_op_signer().get_public_key(), 1);
if utxos.is_none() {
error!("Miner node: UTXOs not found. Switching to Follower node. Restart node when you get some UTXOs.");
false
} else {
info!("Miner node: starting up, UTXOs found.");
true
}
} else {
info!("Follower node: starting up");
false
};
let mut burnchain_tip = burnchain.start();
let mut block_height = burnchain_tip.block_snapshot.block_height;
// setup genesis
let node = NeonGenesisNode::new(self.config.clone(), |_| {});
let mut node = if is_miner {
node.into_initialized_leader_node(burnchain_tip.clone(), self.get_blocks_processed_arc())
} else {
node.into_initialized_node(burnchain_tip.clone(), self.get_blocks_processed_arc())
};
// TODO (hack) instantiate the burndb in the burnchain
let _ = burnchain.burndb_mut();
// Start the runloop
info!("Begin run loop");
self.bump_blocks_processed();
loop {
burnchain_tip = burnchain.sync();
let next_height = burnchain_tip.block_snapshot.block_height;
if next_height <= block_height {
warn!("burnchain.sync() did not progress block height");
continue;
}
// first, let's process all blocks in (block_height, next_height]
for block_to_process in (block_height+1)..(next_height+1) {
let block = {
let ic = burnchain.burndb_ref().index_conn();
BurnDB::get_ancestor_snapshot(
&ic, block_to_process, &burnchain_tip.block_snapshot.burn_header_hash)
.unwrap()
.expect("Failed to find block in fork processed by bitcoin indexer")
};
let burn_header_hash = block.burn_header_hash;
// Have the node process the new block, that can include, or not, a sortition.
node.process_burnchain_state(burnchain.burndb_mut(),
&burn_header_hash);
// Now, tell the relayer to check if it won a sortition during this block,
// and, if so, to process and advertize the block
//
// _this will block if the relayer's buffer is full_
if!node.relayer_sortition_notify() {
// relayer hung up, exit.
error!("Block relayer and miner hung up, exiting.");
process::exit(1);
}
}
// now, let's tell the miner to try and mine.
if!node.relayer_issue_tenure() {
// relayer hung up, exit.
error!("Block relayer and miner hung up, exiting.");
process::exit(1);
}
block_height = next_height;
}
}
}
| get_blocks_processed_arc | identifier_name |
neon.rs | use std::process;
use crate::{Config, NeonGenesisNode, BurnchainController,
BitcoinRegtestController, Keychain};
use stacks::chainstate::burn::db::burndb::BurnDB;
use stacks::burnchains::bitcoin::address::BitcoinAddress;
use stacks::burnchains::Address;
use stacks::burnchains::bitcoin::{BitcoinNetworkType,
address::{BitcoinAddressType}};
use super::RunLoopCallbacks;
/// Coordinating a node running in neon mode.
#[cfg(test)]
pub struct RunLoop {
config: Config,
pub callbacks: RunLoopCallbacks,
blocks_processed: std::sync::Arc<std::sync::atomic::AtomicU64>,
}
#[cfg(not(test))]
pub struct RunLoop {
config: Config,
pub callbacks: RunLoopCallbacks,
}
impl RunLoop {
/// Sets up a runloop and node, given a config.
#[cfg(not(test))]
pub fn new(config: Config) -> Self |
#[cfg(test)]
pub fn new(config: Config) -> Self {
Self {
config,
callbacks: RunLoopCallbacks::new(),
blocks_processed: std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)),
}
}
#[cfg(test)]
pub fn get_blocks_processed_arc(&self) -> std::sync::Arc<std::sync::atomic::AtomicU64> {
self.blocks_processed.clone()
}
#[cfg(not(test))]
fn get_blocks_processed_arc(&self) {
}
#[cfg(test)]
fn bump_blocks_processed(&self) {
self.blocks_processed.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
}
#[cfg(not(test))]
fn bump_blocks_processed(&self) {
}
/// Starts the testnet runloop.
///
/// This function will block by looping infinitely.
/// It will start the burnchain (separate thread), set-up a channel in
/// charge of coordinating the new blocks coming from the burnchain and
/// the nodes, taking turns on tenures.
pub fn start(&mut self, _expected_num_rounds: u64) {
// Initialize and start the burnchain.
let mut burnchain = BitcoinRegtestController::new(self.config.clone());
let is_miner = if self.config.node.miner {
let keychain = Keychain::default(self.config.node.seed.clone());
let btc_addr = BitcoinAddress::from_bytes(
BitcoinNetworkType::Regtest,
BitcoinAddressType::PublicKeyHash,
&Keychain::address_from_burnchain_signer(&keychain.get_burnchain_signer()).to_bytes())
.unwrap();
info!("Miner node: checking UTXOs at address: {}", btc_addr);
let utxos = burnchain.get_utxos(
&keychain.generate_op_signer().get_public_key(), 1);
if utxos.is_none() {
error!("Miner node: UTXOs not found. Switching to Follower node. Restart node when you get some UTXOs.");
false
} else {
info!("Miner node: starting up, UTXOs found.");
true
}
} else {
info!("Follower node: starting up");
false
};
let mut burnchain_tip = burnchain.start();
let mut block_height = burnchain_tip.block_snapshot.block_height;
// setup genesis
let node = NeonGenesisNode::new(self.config.clone(), |_| {});
let mut node = if is_miner {
node.into_initialized_leader_node(burnchain_tip.clone(), self.get_blocks_processed_arc())
} else {
node.into_initialized_node(burnchain_tip.clone(), self.get_blocks_processed_arc())
};
// TODO (hack) instantiate the burndb in the burnchain
let _ = burnchain.burndb_mut();
// Start the runloop
info!("Begin run loop");
self.bump_blocks_processed();
loop {
burnchain_tip = burnchain.sync();
let next_height = burnchain_tip.block_snapshot.block_height;
if next_height <= block_height {
warn!("burnchain.sync() did not progress block height");
continue;
}
// first, let's process all blocks in (block_height, next_height]
for block_to_process in (block_height+1)..(next_height+1) {
let block = {
let ic = burnchain.burndb_ref().index_conn();
BurnDB::get_ancestor_snapshot(
&ic, block_to_process, &burnchain_tip.block_snapshot.burn_header_hash)
.unwrap()
.expect("Failed to find block in fork processed by bitcoin indexer")
};
let burn_header_hash = block.burn_header_hash;
// Have the node process the new block, that can include, or not, a sortition.
node.process_burnchain_state(burnchain.burndb_mut(),
&burn_header_hash);
// Now, tell the relayer to check if it won a sortition during this block,
// and, if so, to process and advertize the block
//
// _this will block if the relayer's buffer is full_
if!node.relayer_sortition_notify() {
// relayer hung up, exit.
error!("Block relayer and miner hung up, exiting.");
process::exit(1);
}
}
// now, let's tell the miner to try and mine.
if!node.relayer_issue_tenure() {
// relayer hung up, exit.
error!("Block relayer and miner hung up, exiting.");
process::exit(1);
}
block_height = next_height;
}
}
}
| {
Self {
config,
callbacks: RunLoopCallbacks::new(),
}
} | identifier_body |
main.rs | extern crate time;
use std::fs::{File, create_dir, OpenOptions, remove_file};
use std::io::{Seek, SeekFrom, BufReader, BufRead, Lines, Write};
use std::io;
use std::path::{Path, PathBuf};
use std::env::{args, home_dir};
use std::fmt;
use std::process::exit;
use time::{Duration, now_utc, Tm, empty_tm, strptime};
fn main() {
let result = match args().nth(1) {
None => Err(PunchClockError::NoCommandGiven),
Some(command) => {
let mut time_clock = TimeClock::new().unwrap();
match &command[..] {
"in" => time_clock.punch_in(),
"out" => time_clock.punch_out(),
"status" => time_clock.status(),
"report" => time_clock.report_daily_hours(),
_ => Err(PunchClockError::UnknownCommand)
}
}
};
if let Err(e) = result {
println!("Error: {}", e);
exit(1);
}
}
#[derive(Debug)]
enum PunchClockError {
NoCommandGiven,
UnknownCommand,
AlreadyPunchedIn,
AlreadyPunchedOut,
CorruptedTimeSheet,
IoError(io::Error),
}
impl fmt::Display for PunchClockError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use PunchClockError::*;
fmt.write_str(
match *self {
NoCommandGiven => "No command given",
UnknownCommand => "Unknown command",
AlreadyPunchedIn => "You are already punched in",
AlreadyPunchedOut => "You're not currently punched in",
CorruptedTimeSheet => "Bad data in timesheet",
IoError(_) => "IO error"
}
)
}
}
impl From<io::Error> for PunchClockError {
fn from(err: io::Error) -> PunchClockError {
PunchClockError::IoError(err)
}
}
type PunchClockResult<T> = Result<T, PunchClockError>;
struct TimeClock {
now: Tm,
timesheet: File,
timesheet_path: PathBuf,
currently_working: bool,
state_path: PathBuf
}
impl TimeClock {
fn new() -> PunchClockResult<TimeClock> {
let now = now_utc();
let home = home_dir().unwrap();
let base_dir = home.join(Path::new(".punch"));
let timesheet_path = base_dir.join("timesheet");
let working_state_path = base_dir.join("state");
if!path_exists(&base_dir) {
try!(create_dir(&base_dir));
}
let timesheet = try!(OpenOptions::new().write(true).append(true)
.create(true).open(×heet_path));
Ok(TimeClock {
timesheet: timesheet,
timesheet_path: timesheet_path,
currently_working: path_exists(&working_state_path),
state_path: working_state_path,
now: now
})
}
// commands
fn punch_in(&mut self) -> PunchClockResult<()> {
if self.currently_working {
return Err(PunchClockError::AlreadyPunchedIn);
}
try!(self.timesheet.seek(SeekFrom::End(0)));
writeln!(&mut self.timesheet, "in: {}", self.now.rfc822()).unwrap();
self.set_current_working_state(true);
Ok(())
}
fn punch_out(&mut self) -> PunchClockResult<()> {
if!self.currently_working {
return Err(PunchClockError::AlreadyPunchedOut);
}
try!(self.timesheet.seek(SeekFrom::End(0)));
try!(writeln!(&mut self.timesheet, "out: {}", self.now.rfc822()));
self.set_current_working_state(false);
Ok(())
}
fn status(&self) -> PunchClockResult<()> {
if self.currently_working {
println!("You're punched in");
} else {
println!("You're punched out");
}
Ok(())
}
fn report_daily_hours(&mut self) -> PunchClockResult<()> | print_time_worked(&time_worked_today, ¤t_day);
}
Ok(())
}
// aux. methods
fn set_current_working_state(&mut self, currently_working: bool) {
self.currently_working = currently_working;
if currently_working {
File::create(&self.state_path).unwrap();
} else {
remove_file(&self.state_path).unwrap();
}
}
}
struct IntervalIter {
lines: Lines<BufReader<File>>
}
impl IntervalIter {
fn from_lines(lines: Lines<BufReader<File>>) -> IntervalIter {
IntervalIter {lines: lines}
}
}
impl Iterator for IntervalIter {
type Item = PunchClockResult<(Tm, Tm)>;
fn next(&mut self) -> Option<PunchClockResult<(Tm, Tm)>> {
// helper function to make error handling a bit nicer
fn inner_unwrap<T>(x: Option<io::Result<T>>)
-> PunchClockResult<Option<T>> {
match x {
None => Ok(None),
Some(Ok(inner)) => Ok(Some(inner)),
Some(Err(e)) => Err(PunchClockError::IoError(e))
}
}
let line_1 = match inner_unwrap(self.lines.next()) {
Ok(l) => l,
Err(e) => return Some(Err(e))
};
let line_2 = match inner_unwrap(self.lines.next()) {
Ok(l) => l,
Err(e) => return Some(Err(e))
};
match (line_1, line_2) {
(None, None) => None,
(Some(start_line), o_end_line) => {
if!start_line.starts_with("in: ") {
return Some(Err(PunchClockError::CorruptedTimeSheet));
}
let start = parse_time(&start_line[4..]);
let end = match o_end_line {
None => now_utc(),
Some(end_line) => {
if!end_line.starts_with("out: ") {
return Some(Err(PunchClockError::CorruptedTimeSheet));
}
parse_time(&end_line[5..])
},
};
Some(Ok((start, end)))
},
_ => unreachable!() // (None, Some(l)) should not happen
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
}
}
fn parse_time(s: &str) -> Tm {
strptime(&s[..s.len() - 1], "%a, %d %b %Y %T %Z").unwrap()
}
fn same_day(t1: &Tm, t2: &Tm) -> bool {
t1.tm_year == t2.tm_year &&
t1.tm_mon == t2.tm_mon &&
t1.tm_mday == t2.tm_mday
}
fn print_time_worked(t: &Duration, day: &Tm) {
println!("{}: {:>2}:{:02}",
day.strftime("%a, %d %b %Y").unwrap(),
t.num_hours(),
t.num_minutes() % 60
);
}
fn path_exists<P: AsRef<Path>>(path: P) -> bool {
::std::fs::metadata(path).is_ok()
}
| {
try!(self.timesheet.seek(SeekFrom::Start(0)));
let buf = BufReader::new(try!(File::open(&self.timesheet_path)));
let mut current_day = empty_tm();
let mut time_worked_today = Duration::zero();
for interval in IntervalIter::from_lines(buf.lines()) {
let (start, end) = try!(interval);
if !same_day(&start, ¤t_day) {
if !time_worked_today.is_zero() {
print_time_worked(&time_worked_today, ¤t_day);
}
current_day = start;
time_worked_today = Duration::zero();
}
time_worked_today =
time_worked_today + (end.to_timespec() - start.to_timespec());
}
if !time_worked_today.is_zero() { | identifier_body |
main.rs | extern crate time;
use std::fs::{File, create_dir, OpenOptions, remove_file};
use std::io::{Seek, SeekFrom, BufReader, BufRead, Lines, Write};
use std::io;
use std::path::{Path, PathBuf};
use std::env::{args, home_dir};
use std::fmt;
use std::process::exit;
use time::{Duration, now_utc, Tm, empty_tm, strptime};
fn main() {
let result = match args().nth(1) {
None => Err(PunchClockError::NoCommandGiven),
Some(command) => {
let mut time_clock = TimeClock::new().unwrap();
match &command[..] {
"in" => time_clock.punch_in(),
"out" => time_clock.punch_out(),
"status" => time_clock.status(),
"report" => time_clock.report_daily_hours(),
_ => Err(PunchClockError::UnknownCommand)
}
}
};
if let Err(e) = result {
println!("Error: {}", e);
exit(1);
}
}
#[derive(Debug)]
enum PunchClockError {
NoCommandGiven,
UnknownCommand,
AlreadyPunchedIn,
AlreadyPunchedOut,
CorruptedTimeSheet,
IoError(io::Error),
}
impl fmt::Display for PunchClockError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use PunchClockError::*;
fmt.write_str(
match *self {
NoCommandGiven => "No command given",
UnknownCommand => "Unknown command",
AlreadyPunchedIn => "You are already punched in",
AlreadyPunchedOut => "You're not currently punched in",
CorruptedTimeSheet => "Bad data in timesheet",
IoError(_) => "IO error"
}
)
}
}
impl From<io::Error> for PunchClockError {
fn from(err: io::Error) -> PunchClockError {
PunchClockError::IoError(err)
}
}
type PunchClockResult<T> = Result<T, PunchClockError>;
struct TimeClock {
now: Tm,
timesheet: File,
timesheet_path: PathBuf,
currently_working: bool,
state_path: PathBuf
}
impl TimeClock {
fn new() -> PunchClockResult<TimeClock> {
let now = now_utc();
let home = home_dir().unwrap();
let base_dir = home.join(Path::new(".punch"));
let timesheet_path = base_dir.join("timesheet");
let working_state_path = base_dir.join("state");
if!path_exists(&base_dir) {
try!(create_dir(&base_dir));
}
let timesheet = try!(OpenOptions::new().write(true).append(true) | .create(true).open(×heet_path));
Ok(TimeClock {
timesheet: timesheet,
timesheet_path: timesheet_path,
currently_working: path_exists(&working_state_path),
state_path: working_state_path,
now: now
})
}
// commands
fn punch_in(&mut self) -> PunchClockResult<()> {
if self.currently_working {
return Err(PunchClockError::AlreadyPunchedIn);
}
try!(self.timesheet.seek(SeekFrom::End(0)));
writeln!(&mut self.timesheet, "in: {}", self.now.rfc822()).unwrap();
self.set_current_working_state(true);
Ok(())
}
fn punch_out(&mut self) -> PunchClockResult<()> {
if!self.currently_working {
return Err(PunchClockError::AlreadyPunchedOut);
}
try!(self.timesheet.seek(SeekFrom::End(0)));
try!(writeln!(&mut self.timesheet, "out: {}", self.now.rfc822()));
self.set_current_working_state(false);
Ok(())
}
fn status(&self) -> PunchClockResult<()> {
if self.currently_working {
println!("You're punched in");
} else {
println!("You're punched out");
}
Ok(())
}
fn report_daily_hours(&mut self) -> PunchClockResult<()> {
try!(self.timesheet.seek(SeekFrom::Start(0)));
let buf = BufReader::new(try!(File::open(&self.timesheet_path)));
let mut current_day = empty_tm();
let mut time_worked_today = Duration::zero();
for interval in IntervalIter::from_lines(buf.lines()) {
let (start, end) = try!(interval);
if!same_day(&start, ¤t_day) {
if!time_worked_today.is_zero() {
print_time_worked(&time_worked_today, ¤t_day);
}
current_day = start;
time_worked_today = Duration::zero();
}
time_worked_today =
time_worked_today + (end.to_timespec() - start.to_timespec());
}
if!time_worked_today.is_zero() {
print_time_worked(&time_worked_today, ¤t_day);
}
Ok(())
}
// aux. methods
fn set_current_working_state(&mut self, currently_working: bool) {
self.currently_working = currently_working;
if currently_working {
File::create(&self.state_path).unwrap();
} else {
remove_file(&self.state_path).unwrap();
}
}
}
struct IntervalIter {
lines: Lines<BufReader<File>>
}
impl IntervalIter {
fn from_lines(lines: Lines<BufReader<File>>) -> IntervalIter {
IntervalIter {lines: lines}
}
}
impl Iterator for IntervalIter {
type Item = PunchClockResult<(Tm, Tm)>;
fn next(&mut self) -> Option<PunchClockResult<(Tm, Tm)>> {
// helper function to make error handling a bit nicer
fn inner_unwrap<T>(x: Option<io::Result<T>>)
-> PunchClockResult<Option<T>> {
match x {
None => Ok(None),
Some(Ok(inner)) => Ok(Some(inner)),
Some(Err(e)) => Err(PunchClockError::IoError(e))
}
}
let line_1 = match inner_unwrap(self.lines.next()) {
Ok(l) => l,
Err(e) => return Some(Err(e))
};
let line_2 = match inner_unwrap(self.lines.next()) {
Ok(l) => l,
Err(e) => return Some(Err(e))
};
match (line_1, line_2) {
(None, None) => None,
(Some(start_line), o_end_line) => {
if!start_line.starts_with("in: ") {
return Some(Err(PunchClockError::CorruptedTimeSheet));
}
let start = parse_time(&start_line[4..]);
let end = match o_end_line {
None => now_utc(),
Some(end_line) => {
if!end_line.starts_with("out: ") {
return Some(Err(PunchClockError::CorruptedTimeSheet));
}
parse_time(&end_line[5..])
},
};
Some(Ok((start, end)))
},
_ => unreachable!() // (None, Some(l)) should not happen
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
}
}
fn parse_time(s: &str) -> Tm {
strptime(&s[..s.len() - 1], "%a, %d %b %Y %T %Z").unwrap()
}
fn same_day(t1: &Tm, t2: &Tm) -> bool {
t1.tm_year == t2.tm_year &&
t1.tm_mon == t2.tm_mon &&
t1.tm_mday == t2.tm_mday
}
fn print_time_worked(t: &Duration, day: &Tm) {
println!("{}: {:>2}:{:02}",
day.strftime("%a, %d %b %Y").unwrap(),
t.num_hours(),
t.num_minutes() % 60
);
}
fn path_exists<P: AsRef<Path>>(path: P) -> bool {
::std::fs::metadata(path).is_ok()
} | random_line_split |
|
main.rs | extern crate time;
use std::fs::{File, create_dir, OpenOptions, remove_file};
use std::io::{Seek, SeekFrom, BufReader, BufRead, Lines, Write};
use std::io;
use std::path::{Path, PathBuf};
use std::env::{args, home_dir};
use std::fmt;
use std::process::exit;
use time::{Duration, now_utc, Tm, empty_tm, strptime};
fn main() {
let result = match args().nth(1) {
None => Err(PunchClockError::NoCommandGiven),
Some(command) => {
let mut time_clock = TimeClock::new().unwrap();
match &command[..] {
"in" => time_clock.punch_in(),
"out" => time_clock.punch_out(),
"status" => time_clock.status(),
"report" => time_clock.report_daily_hours(),
_ => Err(PunchClockError::UnknownCommand)
}
}
};
if let Err(e) = result {
println!("Error: {}", e);
exit(1);
}
}
#[derive(Debug)]
enum PunchClockError {
NoCommandGiven,
UnknownCommand,
AlreadyPunchedIn,
AlreadyPunchedOut,
CorruptedTimeSheet,
IoError(io::Error),
}
impl fmt::Display for PunchClockError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use PunchClockError::*;
fmt.write_str(
match *self {
NoCommandGiven => "No command given",
UnknownCommand => "Unknown command",
AlreadyPunchedIn => "You are already punched in",
AlreadyPunchedOut => "You're not currently punched in",
CorruptedTimeSheet => "Bad data in timesheet",
IoError(_) => "IO error"
}
)
}
}
impl From<io::Error> for PunchClockError {
fn from(err: io::Error) -> PunchClockError {
PunchClockError::IoError(err)
}
}
type PunchClockResult<T> = Result<T, PunchClockError>;
struct TimeClock {
now: Tm,
timesheet: File,
timesheet_path: PathBuf,
currently_working: bool,
state_path: PathBuf
}
impl TimeClock {
fn new() -> PunchClockResult<TimeClock> {
let now = now_utc();
let home = home_dir().unwrap();
let base_dir = home.join(Path::new(".punch"));
let timesheet_path = base_dir.join("timesheet");
let working_state_path = base_dir.join("state");
if!path_exists(&base_dir) {
try!(create_dir(&base_dir));
}
let timesheet = try!(OpenOptions::new().write(true).append(true)
.create(true).open(×heet_path));
Ok(TimeClock {
timesheet: timesheet,
timesheet_path: timesheet_path,
currently_working: path_exists(&working_state_path),
state_path: working_state_path,
now: now
})
}
// commands
fn punch_in(&mut self) -> PunchClockResult<()> {
if self.currently_working {
return Err(PunchClockError::AlreadyPunchedIn);
}
try!(self.timesheet.seek(SeekFrom::End(0)));
writeln!(&mut self.timesheet, "in: {}", self.now.rfc822()).unwrap();
self.set_current_working_state(true);
Ok(())
}
fn punch_out(&mut self) -> PunchClockResult<()> {
if!self.currently_working {
return Err(PunchClockError::AlreadyPunchedOut);
}
try!(self.timesheet.seek(SeekFrom::End(0)));
try!(writeln!(&mut self.timesheet, "out: {}", self.now.rfc822()));
self.set_current_working_state(false);
Ok(())
}
fn status(&self) -> PunchClockResult<()> {
if self.currently_working {
println!("You're punched in");
} else {
println!("You're punched out");
}
Ok(())
}
fn report_daily_hours(&mut self) -> PunchClockResult<()> {
try!(self.timesheet.seek(SeekFrom::Start(0)));
let buf = BufReader::new(try!(File::open(&self.timesheet_path)));
let mut current_day = empty_tm();
let mut time_worked_today = Duration::zero();
for interval in IntervalIter::from_lines(buf.lines()) {
let (start, end) = try!(interval);
if!same_day(&start, ¤t_day) {
if!time_worked_today.is_zero() {
print_time_worked(&time_worked_today, ¤t_day);
}
current_day = start;
time_worked_today = Duration::zero();
}
time_worked_today =
time_worked_today + (end.to_timespec() - start.to_timespec());
}
if!time_worked_today.is_zero() {
print_time_worked(&time_worked_today, ¤t_day);
}
Ok(())
}
// aux. methods
fn | (&mut self, currently_working: bool) {
self.currently_working = currently_working;
if currently_working {
File::create(&self.state_path).unwrap();
} else {
remove_file(&self.state_path).unwrap();
}
}
}
struct IntervalIter {
lines: Lines<BufReader<File>>
}
impl IntervalIter {
fn from_lines(lines: Lines<BufReader<File>>) -> IntervalIter {
IntervalIter {lines: lines}
}
}
impl Iterator for IntervalIter {
type Item = PunchClockResult<(Tm, Tm)>;
fn next(&mut self) -> Option<PunchClockResult<(Tm, Tm)>> {
// helper function to make error handling a bit nicer
fn inner_unwrap<T>(x: Option<io::Result<T>>)
-> PunchClockResult<Option<T>> {
match x {
None => Ok(None),
Some(Ok(inner)) => Ok(Some(inner)),
Some(Err(e)) => Err(PunchClockError::IoError(e))
}
}
let line_1 = match inner_unwrap(self.lines.next()) {
Ok(l) => l,
Err(e) => return Some(Err(e))
};
let line_2 = match inner_unwrap(self.lines.next()) {
Ok(l) => l,
Err(e) => return Some(Err(e))
};
match (line_1, line_2) {
(None, None) => None,
(Some(start_line), o_end_line) => {
if!start_line.starts_with("in: ") {
return Some(Err(PunchClockError::CorruptedTimeSheet));
}
let start = parse_time(&start_line[4..]);
let end = match o_end_line {
None => now_utc(),
Some(end_line) => {
if!end_line.starts_with("out: ") {
return Some(Err(PunchClockError::CorruptedTimeSheet));
}
parse_time(&end_line[5..])
},
};
Some(Ok((start, end)))
},
_ => unreachable!() // (None, Some(l)) should not happen
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
}
}
fn parse_time(s: &str) -> Tm {
strptime(&s[..s.len() - 1], "%a, %d %b %Y %T %Z").unwrap()
}
fn same_day(t1: &Tm, t2: &Tm) -> bool {
t1.tm_year == t2.tm_year &&
t1.tm_mon == t2.tm_mon &&
t1.tm_mday == t2.tm_mday
}
fn print_time_worked(t: &Duration, day: &Tm) {
println!("{}: {:>2}:{:02}",
day.strftime("%a, %d %b %Y").unwrap(),
t.num_hours(),
t.num_minutes() % 60
);
}
fn path_exists<P: AsRef<Path>>(path: P) -> bool {
::std::fs::metadata(path).is_ok()
}
| set_current_working_state | identifier_name |
lib.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The Rust parser and macro expander.
# Note
This API is completely unstable and subject to change.
*/
#![crate_id = "syntax#0.11.0"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/0.11.0/")]
#![feature(macro_rules, globs, managed_boxes, default_type_params, phase)]
#![feature(quote, unsafe_destructor)]
#![allow(deprecated)]
extern crate serialize;
extern crate term;
#[phase(plugin, link)] extern crate log;
extern crate fmt_macros;
extern crate debug;
pub mod util {
pub mod interner;
#[cfg(test)]
pub mod parser_testing;
pub mod small_vector;
}
pub mod syntax {
pub use ext;
pub use parse;
pub use ast;
}
pub mod owned_slice;
pub mod attr;
pub mod diagnostic;
pub mod codemap;
pub mod abi;
pub mod ast; |
pub mod parse;
pub mod crateid;
pub mod print {
pub mod pp;
pub mod pprust;
}
pub mod ext {
pub mod asm;
pub mod base;
pub mod expand;
pub mod quote;
pub mod deriving;
pub mod build;
pub mod tt {
pub mod transcribe;
pub mod macro_parser;
pub mod macro_rules;
}
pub mod mtwt;
pub mod cfg;
pub mod fmt;
pub mod format;
pub mod env;
pub mod bytes;
pub mod concat;
pub mod concat_idents;
pub mod log_syntax;
pub mod source_util;
pub mod trace_macros;
} | pub mod ast_util;
pub mod ast_map;
pub mod visit;
pub mod fold;
| random_line_split |
modify.rs | use std::collections::HashSet;
use std::convert::AsRef;
use std::hash::Hash;
use std::io;
use lber::structures::{Tag, Enumerated, Sequence, Set, OctetString};
use lber::common::TagClass;
use futures::{future, Future};
use tokio_service::Service;
use ldap::{Ldap, LdapOp, next_req_controls};
use result::LdapResult;
/// Possible sub-operations for the Modify operation.
#[derive(Clone, Debug, PartialEq)]
pub enum | <S: AsRef<[u8]> + Eq + Hash> {
/// Add an attribute, with at least one value.
Add(S, HashSet<S>),
/// Delete the entire attribute, or the given values of an attribute.
Delete(S, HashSet<S>),
/// Replace an existing attribute, setting its values to those in the set, or delete it if no values are given.
Replace(S, HashSet<S>),
}
impl Ldap {
/// See [`LdapConn::modify()`](struct.LdapConn.html#method.modify).
pub fn modify<S: AsRef<[u8]> + Eq + Hash>(&self, dn: &str, mods: Vec<Mod<S>>) ->
Box<Future<Item=LdapResult, Error=io::Error>> {
let mut any_add_empty = false;
let req = Tag::Sequence(Sequence {
id: 6,
class: TagClass::Application,
inner: vec![
Tag::OctetString(OctetString {
inner: Vec::from(dn.as_bytes()),
.. Default::default()
}),
Tag::Sequence(Sequence {
inner: mods.into_iter().map(|m| {
let mut is_add = false;
let (num, attr, set) = match m {
Mod::Add(attr, set) => { is_add = true; (0, attr, set) },
Mod::Delete(attr, set) => (1, attr, set),
Mod::Replace(attr, set) => (2, attr, set),
};
if set.is_empty() && is_add {
any_add_empty = true;
}
let op = Tag::Enumerated(Enumerated {
inner: num,
.. Default::default()
});
let part_attr = Tag::Sequence(Sequence {
inner: vec![
Tag::OctetString(OctetString {
inner: Vec::from(attr.as_ref()),
.. Default::default()
}),
Tag::Set(Set {
inner: set.into_iter().map(|val| {
Tag::OctetString(OctetString {
inner: Vec::from(val.as_ref()),
.. Default::default()
})
}).collect(),
.. Default::default()
})
],
.. Default::default()
});
Tag::Sequence(Sequence {
inner: vec![op, part_attr],
.. Default::default()
})
}).collect(),
.. Default::default()
})
]
});
if any_add_empty {
return Box::new(future::err(io::Error::new(io::ErrorKind::Other, "empty value set for Add")));
}
let fut = self.call(LdapOp::Single(req, next_req_controls(self)))
.and_then(|response| {
let (mut result, controls) = (LdapResult::from(response.0), response.1);
result.ctrls = controls;
Ok(result)
});
Box::new(fut)
}
}
| Mod | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.