file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs | //! The purpose of this library is to provide an OpenGL context on as many
//! platforms as possible.
//!
//! # Building a window
//!
//! There are two ways to create a window:
//!
//! - Calling `Window::new()`.
//! - Calling `let builder = WindowBuilder::new()` then `builder.build()`.
//!
//! The first way is the simpliest way and will give you default values.
//!
//! The second way allows you to customize the way your window and GL context
//! will look and behave.
//!
//! # Features
//!
//! This crate has two Cargo features: `window` and `headless`.
//!
//! - `window` allows you to create regular windows and enables the `WindowBuilder` object.
//! - `headless` allows you to do headless rendering, and enables
//! the `HeadlessRendererBuilder` object.
//!
//! By default only `window` is enabled.
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate shared_library;
extern crate libc;
extern crate winit;
#[cfg(target_os = "windows")]
extern crate winapi;
#[cfg(target_os = "windows")]
extern crate kernel32;
#[cfg(target_os = "windows")]
extern crate shell32;
#[cfg(target_os = "windows")]
extern crate gdi32;
#[cfg(target_os = "windows")]
extern crate user32;
#[cfg(target_os = "windows")]
extern crate dwmapi;
#[cfg(any(target_os = "macos", target_os = "ios"))]
#[macro_use]
extern crate objc;
#[cfg(target_os = "macos")]
extern crate cgl;
#[cfg(target_os = "macos")]
extern crate cocoa;
#[cfg(target_os = "macos")]
extern crate core_foundation;
#[cfg(target_os = "macos")]
extern crate core_graphics;
#[cfg(any(target_os = "linux", target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
extern crate x11_dl;
#[cfg(any(target_os = "linux", target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))]
#[macro_use(wayland_env)]
extern crate wayland_client;
pub use events::*;
pub use headless::{HeadlessRendererBuilder, HeadlessContext};
pub use window::{AvailableMonitorsIter, MonitorId, WindowId, get_available_monitors, get_primary_monitor};
pub use winit::NativeMonitorId;
use std::io;
mod api;
mod platform;
mod events;
mod headless;
mod window;
pub mod os;
/// Represents an OpenGL context and the Window or environment around it.
///
/// # Example
///
/// ```ignore
/// let window = Window::new(&events_loop).unwrap();
///
/// unsafe { window.make_current() };
///
/// loop {
/// events_loop.poll_events(|event| {
/// match(event) {
/// // process events here
/// _ => ()
/// }
/// });
///
/// // draw everything here
///
/// window.swap_buffers();
/// std::thread::sleep(std::time::Duration::from_millis(17));
/// }
/// ```
pub struct Window {
window: platform::Window,
}
/// Object that allows you to build windows.
#[derive(Clone)]
pub struct WindowBuilder<'a> {
winit_builder: winit::WindowBuilder,
/// The attributes to use to create the context.
pub opengl: GlAttributes<&'a platform::Window>,
// Should be made public once it's stabilized.
pf_reqs: PixelFormatRequirements,
}
/// Provides a way to retreive events from the windows that are registered to it.
// TODO: document usage in multiple threads
pub struct EventsLoop {
events_loop: platform::EventsLoop,
}
impl EventsLoop {
/// Builds a new events loop.
pub fn new() -> EventsLoop {
EventsLoop {
events_loop: platform::EventsLoop::new(),
}
}
/// Fetches all the events that are pending, calls the callback function for each of them,
/// and returns.
#[inline]
pub fn poll_events<F>(&self, callback: F)
where F: FnMut(Event)
{
self.events_loop.poll_events(callback)
}
/// Runs forever until `interrupt()` is called. Whenever an event happens, calls the callback.
#[inline]
pub fn run_forever<F>(&self, callback: F)
where F: FnMut(Event)
{
self.events_loop.run_forever(callback)
}
/// If we called `run_forever()`, stops the process of waiting for events.
#[inline]
pub fn interrupt(&self) {
self.events_loop.interrupt()
}
}
/// Trait that describes objects that have access to an OpenGL context.
pub trait GlContext {
/// Sets the context as the current context.
unsafe fn make_current(&self) -> Result<(), ContextError>;
/// Returns true if this context is the current one in this thread.
fn is_current(&self) -> bool;
/// Returns the address of an OpenGL function.
fn get_proc_address(&self, addr: &str) -> *const ();
/// Swaps the buffers in case of double or triple buffering.
///
/// You should call this function every time you have finished rendering, or the image
/// may not be displayed on the screen.
///
/// **Warning**: if you enabled vsync, this function will block until the next time the screen
/// is refreshed. However drivers can choose to override your vsync settings, which means that
/// you can't know in advance whether `swap_buffers` will block or not.
fn swap_buffers(&self) -> Result<(), ContextError>;
/// Returns the OpenGL API being used.
fn get_api(&self) -> Api;
/// Returns the pixel format of the main framebuffer of the context.
fn get_pixel_format(&self) -> PixelFormat;
}
/// Error that can happen while creating a window or a headless renderer.
#[derive(Debug)]
pub enum CreationError {
OsError(String),
/// TODO: remove this error
NotSupported,
NoBackendAvailable(Box<std::error::Error + Send>),
RobustnessNotSupported,
OpenGlVersionNotSupported,
NoAvailablePixelFormat,
}
impl CreationError {
fn to_string(&self) -> &str {
match *self {
CreationError::OsError(ref text) => &text,
CreationError::NotSupported => "Some of the requested attributes are not supported",
CreationError::NoBackendAvailable(_) => "No backend is available",
CreationError::RobustnessNotSupported => "You requested robustness, but it is \
not supported.",
CreationError::OpenGlVersionNotSupported => "The requested OpenGL version is not \
supported.",
CreationError::NoAvailablePixelFormat => "Couldn't find any pixel format that matches \
the criterias.",
}
}
}
impl std::fmt::Display for CreationError {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
formatter.write_str(self.to_string())?;
if let Some(err) = std::error::Error::cause(self) {
write!(formatter, ": {}", err)?;
}
Ok(())
}
}
impl std::error::Error for CreationError {
fn description(&self) -> &str {
self.to_string()
}
fn cause(&self) -> Option<&std::error::Error> {
match *self {
CreationError::NoBackendAvailable(ref err) => Some(&**err),
_ => None
}
}
}
/// Error that can happen when manipulating an OpenGL context.
#[derive(Debug)]
pub enum ContextError {
IoError(io::Error),
ContextLost,
}
impl ContextError {
fn to_string(&self) -> &str {
use std::error::Error;
match *self {
ContextError::IoError(ref err) => err.description(),
ContextError::ContextLost => "Context lost"
}
}
}
impl std::fmt::Display for ContextError {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
formatter.write_str(self.to_string())
}
}
impl std::error::Error for ContextError {
fn description(&self) -> &str {
self.to_string()
}
}
/// All APIs related to OpenGL that you can possibly get while using glutin.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Api {
/// The classical OpenGL. Available on Windows, Linux, OS/X.
OpenGl,
/// OpenGL embedded system. Available on Linux, Android.
OpenGlEs,
/// OpenGL for the web. Very similar to OpenGL ES.
WebGl,
}
/// Describes the requested OpenGL context profiles.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum GlProfile {
/// Include all the immediate more functions and definitions.
Compatibility,
/// Include all the future-compatible functions and definitions.
Core,
}
/// Describes the OpenGL API and version that are being requested when a context is created.
#[derive(Debug, Copy, Clone)]
pub enum GlRequest {
/// Request the latest version of the "best" API of this platform.
///
/// On desktop, will try OpenGL.
Latest,
/// Request a specific version of a specific API.
///
/// Example: `GlRequest::Specific(Api::OpenGl, (3, 3))`.
Specific(Api, (u8, u8)),
/// If OpenGL is available, create an OpenGL context with the specified `opengl_version`.
/// Else if OpenGL ES or WebGL is available, create a context with the
/// specified `opengles_version`.
GlThenGles {
/// The version to use for OpenGL.
opengl_version: (u8, u8),
/// The version to use for OpenGL ES.
opengles_version: (u8, u8),
},
}
impl GlRequest {
/// Extract the desktop GL version, if any.
pub fn to_gl_version(&self) -> Option<(u8, u8)> {
match self {
&GlRequest::Specific(Api::OpenGl, version) => Some(version),
&GlRequest::GlThenGles { opengl_version: version,.. } => Some(version),
_ => None,
}
}
}
/// The minimum core profile GL context. Useful for getting the minimum
/// required GL version while still running on OSX, which often forbids
/// the compatibility profile features.
pub static GL_CORE: GlRequest = GlRequest::Specific(Api::OpenGl, (3, 2));
/// Specifies the tolerance of the OpenGL context to faults. If you accept raw OpenGL commands
/// and/or raw shader code from an untrusted source, you should definitely care about this.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Robustness {
/// Not everything is checked. Your application can crash if you do something wrong with your
/// shaders.
NotRobust,
/// The driver doesn't check anything. This option is very dangerous. Please know what you're
/// doing before using it. See the `GL_KHR_no_error` extension.
///
/// Since this option is purely an optimisation, no error will be returned if the backend
/// doesn't support it. Instead it will automatically fall back to `NotRobust`.
NoError,
/// Everything is checked to avoid any crash. The driver will attempt to avoid any problem,
/// but if a problem occurs the behavior is implementation-defined. You are just guaranteed not
/// to get a crash.
RobustNoResetNotification,
/// Same as `RobustNoResetNotification` but the context creation doesn't fail if it's not
/// supported.
TryRobustNoResetNotification,
/// Everything is checked to avoid any crash. If a problem occurs, the context will enter a
/// "context lost" state. It must then be recreated. For the moment, glutin doesn't provide a
/// way to recreate a context with the same window :-/
RobustLoseContextOnReset,
/// Same as `RobustLoseContextOnReset` but the context creation doesn't fail if it's not
/// supported.
TryRobustLoseContextOnReset,
}
/// The behavior of the driver when you change the current context.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ReleaseBehavior {
/// Doesn't do anything. Most notably doesn't flush.
None,
/// Flushes the context that was previously current as if `glFlush` was called.
Flush,
}
pub use winit::MouseCursor;
pub use winit::CursorState;
/// Describes a possible format. Unused.
#[allow(missing_docs)]
#[derive(Debug, Clone)]
pub struct PixelFormat {
pub hardware_accelerated: bool,
pub color_bits: u8,
pub alpha_bits: u8,
pub depth_bits: u8,
pub stencil_bits: u8,
pub stereoscopy: bool,
pub double_buffer: bool,
pub multisampling: Option<u16>,
pub srgb: bool,
}
/// Describes how the backend should choose a pixel format.
// TODO: swap method? (swap, copy)
#[derive(Clone, Debug)]
pub struct PixelFormatRequirements {
/// If true, only hardware-accelerated formats will be conisdered. If false, only software
/// renderers. `None` means "don't care". Default is `Some(true)`.
pub hardware_accelerated: Option<bool>,
/// Minimum number of bits for the color buffer, excluding alpha. `None` means "don't care".
/// The default is `Some(24)`.
pub color_bits: Option<u8>,
/// If true, the color buffer must be in a floating point format. Default is `false`.
///
/// Using floating points allows you to write values outside of the `[0.0, 1.0]` range.
pub float_color_buffer: bool,
/// Minimum number of bits for the alpha in the color buffer. `None` means "don't care".
/// The default is `Some(8)`.
pub alpha_bits: Option<u8>,
/// Minimum number of bits for the depth buffer. `None` means "don't care".
/// The default value is `Some(24)`.
pub depth_bits: Option<u8>,
/// Minimum number of bits for the depth buffer. `None` means "don't care".
/// The default value is `Some(8)`.
pub stencil_bits: Option<u8>,
/// If true, only double-buffered formats will be considered. If false, only single-buffer
/// formats. `None` means "don't care". The default is `Some(true)`.
pub double_buffer: Option<bool>,
/// Contains the minimum number of samples per pixel in the color, depth and stencil buffers.
/// `None` means "don't care". Default is `None`.
/// A value of `Some(0)` indicates that multisampling must not be enabled.
pub multisampling: Option<u16>,
/// If true, only stereoscopic formats will be considered. If false, only non-stereoscopic
/// formats. The default is `false`.
pub stereoscopy: bool,
/// If true, only sRGB-capable formats will be considered. If false, don't care.
/// The default is `false`.
pub srgb: bool,
/// The behavior when changing the current context. Default is `Flush`.
pub release_behavior: ReleaseBehavior,
}
impl Default for PixelFormatRequirements {
#[inline]
fn default() -> PixelFormatRequirements {
PixelFormatRequirements {
hardware_accelerated: Some(true),
color_bits: Some(24),
float_color_buffer: false,
alpha_bits: Some(8),
depth_bits: Some(24),
stencil_bits: Some(8),
double_buffer: None,
multisampling: None,
stereoscopy: false,
srgb: false,
release_behavior: ReleaseBehavior::Flush,
}
}
}
pub use winit::WindowAttributes; // TODO
/// Attributes to use when creating an OpenGL context.
#[derive(Clone)]
pub struct GlAttributes<S> {
/// An existing context to share the new the context with.
///
/// The default is `None`.
pub sharing: Option<S>,
/// Version to try create. See `GlRequest` for more infos.
///
/// The default is `Latest`.
pub version: GlRequest,
/// OpenGL profile to use.
///
/// The default is `None`.
pub profile: Option<GlProfile>,
/// Whether to enable the `debug` flag of the context.
///
/// Debug contexts are usually slower but give better error reporting.
///
/// The default is `true` in debug mode and `false` in release mode.
pub debug: bool,
/// How the OpenGL context should detect errors.
///
/// The default is `NotRobust` because this is what is typically expected when you create an
/// OpenGL context. However for safety you should consider `TryRobustLoseContextOnReset`.
pub robustness: Robustness,
| /// Whether to use vsync. If vsync is enabled, calling `swap_buffers` will block until the
/// screen refreshes. This is typically used to prevent screen tearing.
///
/// The default is `false`.
pub vsync: bool,
}
impl<S> GlAttributes<S> {
/// Turns the `sharing` parameter into another type by calling a closure.
#[inline]
pub fn map_sharing<F, T>(self, f: F) -> GlAttributes<T> where F: FnOnce(S) -> T {
GlAttributes {
sharing: self.sharing.map(f),
version: self.version,
profile: self.profile,
debug: self.debug,
robustness: self.robustness,
vsync: self.vsync,
}
}
}
impl<S> Default for GlAttributes<S> {
#[inline]
fn default() -> GlAttributes<S> {
GlAttributes {
sharing: None,
version: GlRequest::Latest,
profile: None,
debug: cfg!(debug_assertions),
robustness: Robustness::NotRobust,
vsync: false,
}
}
} | random_line_split |
|
generic-tuple-style-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// gdb-command:set print union on
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print case1
// gdb-check:$1 = {{Case1, 0, 31868, 31868, 31868, 31868}, {Case1, 0, 2088533116, 2088533116}, {Case1, 0, 8970181431921507452}} | // gdb-command:print case2
// gdb-check:$2 = {{Case2, 0, 4369, 4369, 4369, 4369}, {Case2, 0, 286331153, 286331153}, {Case2, 0, 1229782938247303441}}
// gdb-command:print case3
// gdb-check:$3 = {{Case3, 0, 22873, 22873, 22873, 22873}, {Case3, 0, 1499027801, 1499027801}, {Case3, 0, 6438275382588823897}}
// gdb-command:print univariant
// gdb-check:$4 = {{-1}}
// NOTE: This is a copy of the non-generic test case. The `Txx` type parameters have to be
// substituted with something of size `xx` bits and the same alignment as an integer type of the
// same size.
// The first element is to ensure proper alignment, irrespective of the machines word size. Since
// the size of the discriminant value is machine dependent, this has be taken into account when
// datatype layout should be predictable as in this case.
enum Regular<T16, T32, T64> {
Case1(T64, T16, T16, T16, T16),
Case2(T64, T32, T32),
Case3(T64, T64)
}
enum Univariant<T64> {
TheOnlyCase(T64)
}
fn main() {
// In order to avoid endianess trouble all of the following test values consist of a single
// repeated byte. This way each interpretation of the union should look the same, no matter if
// this is a big or little endian machine.
// 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
// 0b01111100011111000111110001111100 = 2088533116
// 0b0111110001111100 = 31868
// 0b01111100 = 124
let case1: Regular<u16, u32, u64> = Case1(0_u64, 31868_u16, 31868_u16, 31868_u16, 31868_u16);
// 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
// 0b00010001000100010001000100010001 = 286331153
// 0b0001000100010001 = 4369
// 0b00010001 = 17
let case2: Regular<i16, i32, i64> = Case2(0_i64, 286331153_i32, 286331153_i32);
// 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
// 0b01011001010110010101100101011001 = 1499027801
// 0b0101100101011001 = 22873
// 0b01011001 = 89
let case3: Regular<i16, i32, i64> = Case3(0_i64, 6438275382588823897_i64);
let univariant = TheOnlyCase(-1_i64);
zzz();
}
fn zzz() {()} | random_line_split |
|
generic-tuple-style-enum.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// gdb-command:set print union on
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print case1
// gdb-check:$1 = {{Case1, 0, 31868, 31868, 31868, 31868}, {Case1, 0, 2088533116, 2088533116}, {Case1, 0, 8970181431921507452}}
// gdb-command:print case2
// gdb-check:$2 = {{Case2, 0, 4369, 4369, 4369, 4369}, {Case2, 0, 286331153, 286331153}, {Case2, 0, 1229782938247303441}}
// gdb-command:print case3
// gdb-check:$3 = {{Case3, 0, 22873, 22873, 22873, 22873}, {Case3, 0, 1499027801, 1499027801}, {Case3, 0, 6438275382588823897}}
// gdb-command:print univariant
// gdb-check:$4 = {{-1}}
// NOTE: This is a copy of the non-generic test case. The `Txx` type parameters have to be
// substituted with something of size `xx` bits and the same alignment as an integer type of the
// same size.
// The first element is to ensure proper alignment, irrespective of the machines word size. Since
// the size of the discriminant value is machine dependent, this has be taken into account when
// datatype layout should be predictable as in this case.
enum Regular<T16, T32, T64> {
Case1(T64, T16, T16, T16, T16),
Case2(T64, T32, T32),
Case3(T64, T64)
}
enum Univariant<T64> {
TheOnlyCase(T64)
}
fn | () {
// In order to avoid endianess trouble all of the following test values consist of a single
// repeated byte. This way each interpretation of the union should look the same, no matter if
// this is a big or little endian machine.
// 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
// 0b01111100011111000111110001111100 = 2088533116
// 0b0111110001111100 = 31868
// 0b01111100 = 124
let case1: Regular<u16, u32, u64> = Case1(0_u64, 31868_u16, 31868_u16, 31868_u16, 31868_u16);
// 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
// 0b00010001000100010001000100010001 = 286331153
// 0b0001000100010001 = 4369
// 0b00010001 = 17
let case2: Regular<i16, i32, i64> = Case2(0_i64, 286331153_i32, 286331153_i32);
// 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
// 0b01011001010110010101100101011001 = 1499027801
// 0b0101100101011001 = 22873
// 0b01011001 = 89
let case3: Regular<i16, i32, i64> = Case3(0_i64, 6438275382588823897_i64);
let univariant = TheOnlyCase(-1_i64);
zzz();
}
fn zzz() {()}
| main | identifier_name |
lib.rs | //! Simple counting bloom filters.
#![license = "MIT"]
#![deny(missing_doc)]
extern crate rand;
use rand::Rng;
use std::hash::{hash,Hash};
use std::iter;
use std::num;
use std::uint;
/// A counting bloom filter.
///
/// A bloom filter is a probabilistic data structure which allows you to add and
/// remove elements from a set, query the set for whether it may contain an
/// element or definitely exclude it, and uses much less ram than an equivalent
/// hashtable.
#[deriving(Clone)]
pub struct BloomFilter {
buf: Vec<uint>,
number_of_insertions: uint,
}
// Here's where some of the magic numbers came from:
//
// m = number of elements in the filter
// n = size of the filter
// k = number of hash functions
//
// p = Pr[false positive] = 0.01 false positive rate
//
// if we have an estimation of the number of elements in the bloom filter, we
// know m.
//
// p = (1 - exp(-kn/m))^k
// k = (m/n)ln2
// lnp = -(m/n)(ln2)^2
// m = -nlnp/(ln2)^2
// => n = -m(ln2)^2/lnp
// ~= 10*m
//
// k = (m/n)ln2 = 10ln2 ~= 7
static NUMBER_OF_HASHES: uint = 7;
static BITS_PER_BUCKET: uint = 4;
static BUCKETS_PER_WORD: uint = uint::BITS / BITS_PER_BUCKET;
/// Returns a tuple of (array index, lsr shift amount) to get to the bits you
/// need. Don't forget to mask with 0xF!
fn bucket_index_to_array_index(bucket_index: uint) -> (uint, uint) {
let arr_index = bucket_index / BUCKETS_PER_WORD;
let shift_amount = (bucket_index % BUCKETS_PER_WORD) * BITS_PER_BUCKET;
(arr_index, shift_amount)
}
// Key Stretching
// ==============
//
// Siphash is expensive. Instead of running it `NUMBER_OF_HASHES`, which would
// be a pretty big hit on performance, we just use it to see a non-cryptographic
// random number generator. This stretches the hash to get us our
// `NUMBER_OF_HASHES` array indicies.
//
// A hash is a `u64` and comes from SipHash.
// A shash is a `uint` stretched hash which comes from the XorShiftRng.
fn to_rng(hash: u64) -> rand::XorShiftRng {
let bottom = (hash & 0xFFFFFFFF) as u32;
let top = ((hash >> 32) & 0xFFFFFFFF) as u32;
rand::SeedableRng::from_seed([ 0x97830e05, 0x113ba7bb, bottom, top ])
}
fn stretch<'a>(r: &'a mut rand::XorShiftRng)
-> iter::Take<rand::Generator<'a, uint, rand::XorShiftRng>> {
r.gen_iter().take(NUMBER_OF_HASHES)
}
impl BloomFilter {
/// This bloom filter is tuned to have ~1% false positive rate. In exchange
/// for this guarantee, you need to have a reasonable upper bound on the
/// number of elements that will ever be inserted into it. If you guess too
/// low, your false positive rate will suffer. If you guess too high, you'll
/// use more memory than is really necessary.
pub fn new(expected_number_of_insertions: uint) -> BloomFilter {
let size_in_buckets = 10 * expected_number_of_insertions;
let num_words =
num::checked_next_power_of_two(size_in_buckets / BUCKETS_PER_WORD)
.unwrap();
BloomFilter {
buf: Vec::from_elem(num_words, 0),
number_of_insertions: 0,
}
}
/// Since the array length must be a power of two, this will return a
/// bitmask that can be `&`ed with a number to bring it into the range of
/// the array.
fn mask(&self) -> uint {
(self.buf.len()*BUCKETS_PER_WORD) - 1 //guaranteed to be a power of two
}
/// Converts a stretched hash into a bucket index.
fn shash_to_bucket_index(&self, shash: uint) -> uint {
shash & self.mask()
}
/// Converts a stretched hash into an array and bit index. See the comment
/// on `bucket_index_to_array_index` for details about the return value.
fn shash_to_array_index(&self, shash: uint) -> (uint, uint) {
bucket_index_to_array_index(self.shash_to_bucket_index(shash))
}
/// Gets the value at a given bucket.
fn bucket_get(&self, a_idx: uint, shift_amount: uint) -> uint {
let array_val = self.buf[a_idx];
(array_val >> shift_amount) & 0xF
}
/// Sets the value at a given bucket. This will not bounds check, but that's
/// ok because you've called `bucket_get` first, anyhow.
fn bucket_set(&mut self, a_idx: uint, shift_amount: uint, new_val: uint) {
// We can avoid bounds checking here since in order to do a bucket_set
// we have to had done a `bucket_get` at the same index for it to make
// sense.
let old_val = self.buf.as_mut_slice().get_mut(a_idx).unwrap();
let mask = (1 << BITS_PER_BUCKET) - 1; // selects the right-most bucket
let select_in_bucket = mask << shift_amount; // selects the correct bucket
let select_out_of_bucket =!select_in_bucket; // selects everything except the correct bucket
let new_array_val = (new_val << shift_amount) // move the new_val into the right spot
| (*old_val & select_out_of_bucket); // mask out the old value, and or it with the new one
*old_val = new_array_val;
}
/// Insert a stretched hash into the bloom filter, remembering to saturate
/// the counter instead of overflowing.
fn insert_shash(&mut self, shash: uint) {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
let b_val = self.bucket_get(a_idx, shift_amount);
// saturate the count.
if b_val == 0xF {
return;
}
let new_val = b_val + 1;
self.bucket_set(a_idx, shift_amount, new_val);
}
/// Insert a hashed value into the bloom filter.
fn insert_hashed(&mut self, hash: u64) {
self.number_of_insertions += 1;
for h in stretch(&mut to_rng(hash)) {
self.insert_shash(h);
}
}
/// Inserts a value into the bloom filter. Note that the bloom filter isn't
/// parameterized over the values it holds. That's because it can hold
/// values of different types, as long as it can get a hash out of them.
pub fn insert<H: Hash>(&mut self, h: &H) {
self.insert_hashed(hash(h))
}
/// Removes a stretched hash from the bloom filter, taking care not to
/// decrememnt saturated counters.
///
/// It is an error to remove never-inserted elements.
fn remove_shash(&mut self, shash: uint) {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
let b_val = self.bucket_get(a_idx, shift_amount);
assert!(b_val!= 0, "Removing an element that was never inserted.");
// can't do anything if the counter saturated.
if b_val == 0xF { return; }
self.bucket_set(a_idx, shift_amount, b_val - 1);
}
/// Removes a hashed value from the bloom filter.
fn remove_hashed(&mut self, hash: u64) {
self.number_of_insertions -= 1;
for h in stretch(&mut to_rng(hash)) {
self.remove_shash(h);
}
}
/// Removes a value from the bloom filter.
///
/// Be careful of adding and removing lots of elements, especially for
/// long-lived bloom filters. The counters in each bucket will saturate if
/// 16 or more elements hash to it, and then stick there. This will hurt
/// your false positive rate. To fix this, you might consider refreshing the
/// bloom filter by `clear`ing it, and then reinserting elements at regular,
/// long intervals.
///
/// It is an error to remove never-inserted elements.
pub fn remove<H: Hash>(&mut self, h: &H) {
self.remove_hashed(hash(h))
}
/// Returns `true` if the bloom filter cannot possibly contain the given
/// stretched hash.
fn definitely_excludes_shash(&self, shash: uint) -> bool {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
self.bucket_get(a_idx, shift_amount) == 0
}
/// A hash is definitely excluded iff none of the stretched hashes are in
/// the bloom filter.
fn definitely_excludes_hashed(&self, hash: u64) -> bool {
let mut ret = false;
for shash in stretch(&mut to_rng(hash)) {
ret |= self.definitely_excludes_shash(shash);
}
ret
}
/// A bloom filter can tell you whether or not a value has definitely never
/// been inserted. Note that bloom filters can give false positives.
pub fn definitely_excludes<H: Hash>(&self, h: &H) -> bool {
self.definitely_excludes_hashed(hash(h))
}
/// A bloom filter can tell you if an element /may/ be in it. It cannot be
/// certain. But, assuming correct usage, this query will have a low false
// positive rate.
pub fn may_include<H: Hash>(&self, h: &H) -> bool {
!self.definitely_excludes(h)
}
/// Returns the number of elements ever inserted into the bloom filter - the
/// number of elements removed.
pub fn number_of_insertions(&self) -> uint {
self.number_of_insertions
}
/// Returns the number of bytes of memory the bloom filter uses.
pub fn size(&self) -> uint {
self.buf.len() * uint::BYTES
}
/// Removes all elements from the bloom filter. This is both more efficient
/// and has better false-positive properties than repeatedly calling `remove`
/// on every element.
pub fn clear(&mut self) {
self.number_of_insertions = 0;
for x in self.buf.as_mut_slice().mut_iter() {
*x = 0u;
}
}
}
#[test]
fn create_and_insert_some_stuff() | for i in range(0u, 100) {
bf.remove(&i);
}
assert_eq!(bf.number_of_insertions(), 900);
for i in range(100u, 1000) {
assert!(bf.may_include(&i));
}
let false_positives = range(0u, 100).filter(|i| bf.may_include(&i)).count();
assert!(false_positives < 2); // 2%.
bf.clear();
assert_eq!(bf.number_of_insertions(), 0);
for i in range(0u, 2000) {
assert!(bf.definitely_excludes(&i));
}
}
#[cfg(test)]
mod bench {
extern crate test;
use std::hash::hash;
use std::iter;
use super::BloomFilter;
#[bench]
fn create_insert_1000_remove_100_lookup_100(b: &mut test::Bencher) {
b.iter(|| {
let mut bf = BloomFilter::new(1000);
for i in iter::range(0u, 1000) {
bf.insert(&i);
}
for i in iter::range(0u, 100) {
bf.remove(&i);
}
for i in iter::range(100u, 200) {
test::black_box(bf.may_include(&i));
}
});
}
#[bench]
fn may_include(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
for i in iter::range(0u, 1000) {
bf.insert(&i);
}
let mut i = 0u;
b.bench_n(1000, |b| {
b.iter(|| {
test::black_box(bf.may_include(&i));
i += 1;
});
});
}
#[bench]
fn insert(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
b.bench_n(1000, |b| {
let mut i = 0u;
b.iter(|| {
test::black_box(bf.insert(&i));
i += 1;
});
});
}
#[bench]
fn remove(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
for i in range(0u, 1000) {
bf.insert(&i);
}
b.bench_n(1000, |b| {
let mut i = 0u;
b.iter(|| {
bf.remove(&i);
i += 1;
});
});
test::black_box(bf.may_include(&0u));
}
#[bench]
fn hash_a_uint(b: &mut test::Bencher) {
let mut i = 0u;
b.iter(|| {
test::black_box(hash(&i));
i += 1;
})
}
}
| {
use std::iter::range;
let mut bf = BloomFilter::new(1000);
for i in range(0u, 1000) {
bf.insert(&i);
}
assert_eq!(bf.number_of_insertions(), 1000);
for i in range(0u, 1000) {
assert!(bf.may_include(&i));
}
let false_positives =
range(1001u, 2000).filter(|i| bf.may_include(&i)).count();
assert!(false_positives < 10) // 1%.
| identifier_body |
lib.rs | //! Simple counting bloom filters.
#![license = "MIT"]
#![deny(missing_doc)]
extern crate rand;
use rand::Rng;
use std::hash::{hash,Hash};
use std::iter;
use std::num;
use std::uint;
/// A counting bloom filter.
///
/// A bloom filter is a probabilistic data structure which allows you to add and
/// remove elements from a set, query the set for whether it may contain an
/// element or definitely exclude it, and uses much less ram than an equivalent
/// hashtable.
#[deriving(Clone)]
pub struct BloomFilter {
buf: Vec<uint>,
number_of_insertions: uint,
}
// Here's where some of the magic numbers came from:
//
// m = number of elements in the filter
// n = size of the filter
// k = number of hash functions
//
// p = Pr[false positive] = 0.01 false positive rate
//
// if we have an estimation of the number of elements in the bloom filter, we
// know m.
//
// p = (1 - exp(-kn/m))^k
// k = (m/n)ln2
// lnp = -(m/n)(ln2)^2
// m = -nlnp/(ln2)^2
// => n = -m(ln2)^2/lnp
// ~= 10*m
//
// k = (m/n)ln2 = 10ln2 ~= 7
static NUMBER_OF_HASHES: uint = 7;
static BITS_PER_BUCKET: uint = 4;
static BUCKETS_PER_WORD: uint = uint::BITS / BITS_PER_BUCKET;
/// Returns a tuple of (array index, lsr shift amount) to get to the bits you
/// need. Don't forget to mask with 0xF!
fn bucket_index_to_array_index(bucket_index: uint) -> (uint, uint) {
let arr_index = bucket_index / BUCKETS_PER_WORD;
let shift_amount = (bucket_index % BUCKETS_PER_WORD) * BITS_PER_BUCKET;
(arr_index, shift_amount)
}
// Key Stretching
// ==============
//
// Siphash is expensive. Instead of running it `NUMBER_OF_HASHES`, which would
// be a pretty big hit on performance, we just use it to see a non-cryptographic
// random number generator. This stretches the hash to get us our
// `NUMBER_OF_HASHES` array indicies.
//
// A hash is a `u64` and comes from SipHash.
// A shash is a `uint` stretched hash which comes from the XorShiftRng.
fn to_rng(hash: u64) -> rand::XorShiftRng {
let bottom = (hash & 0xFFFFFFFF) as u32;
let top = ((hash >> 32) & 0xFFFFFFFF) as u32;
rand::SeedableRng::from_seed([ 0x97830e05, 0x113ba7bb, bottom, top ])
}
fn stretch<'a>(r: &'a mut rand::XorShiftRng)
-> iter::Take<rand::Generator<'a, uint, rand::XorShiftRng>> {
r.gen_iter().take(NUMBER_OF_HASHES)
}
impl BloomFilter {
/// This bloom filter is tuned to have ~1% false positive rate. In exchange
/// for this guarantee, you need to have a reasonable upper bound on the
/// number of elements that will ever be inserted into it. If you guess too
/// low, your false positive rate will suffer. If you guess too high, you'll
/// use more memory than is really necessary.
pub fn new(expected_number_of_insertions: uint) -> BloomFilter {
let size_in_buckets = 10 * expected_number_of_insertions;
let num_words =
num::checked_next_power_of_two(size_in_buckets / BUCKETS_PER_WORD)
.unwrap();
BloomFilter {
buf: Vec::from_elem(num_words, 0),
number_of_insertions: 0,
}
}
/// Since the array length must be a power of two, this will return a
/// bitmask that can be `&`ed with a number to bring it into the range of
/// the array.
fn mask(&self) -> uint {
(self.buf.len()*BUCKETS_PER_WORD) - 1 //guaranteed to be a power of two
}
/// Converts a stretched hash into a bucket index.
fn shash_to_bucket_index(&self, shash: uint) -> uint {
shash & self.mask()
}
/// Converts a stretched hash into an array and bit index. See the comment
/// on `bucket_index_to_array_index` for details about the return value.
fn shash_to_array_index(&self, shash: uint) -> (uint, uint) {
bucket_index_to_array_index(self.shash_to_bucket_index(shash))
}
/// Gets the value at a given bucket.
fn bucket_get(&self, a_idx: uint, shift_amount: uint) -> uint {
let array_val = self.buf[a_idx];
(array_val >> shift_amount) & 0xF
}
/// Sets the value at a given bucket. This will not bounds check, but that's
/// ok because you've called `bucket_get` first, anyhow.
fn bucket_set(&mut self, a_idx: uint, shift_amount: uint, new_val: uint) {
// We can avoid bounds checking here since in order to do a bucket_set
// we have to had done a `bucket_get` at the same index for it to make
// sense.
let old_val = self.buf.as_mut_slice().get_mut(a_idx).unwrap();
let mask = (1 << BITS_PER_BUCKET) - 1; // selects the right-most bucket
let select_in_bucket = mask << shift_amount; // selects the correct bucket
let select_out_of_bucket =!select_in_bucket; // selects everything except the correct bucket
let new_array_val = (new_val << shift_amount) // move the new_val into the right spot
| (*old_val & select_out_of_bucket); // mask out the old value, and or it with the new one
*old_val = new_array_val;
}
/// Insert a stretched hash into the bloom filter, remembering to saturate
/// the counter instead of overflowing.
fn insert_shash(&mut self, shash: uint) {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
let b_val = self.bucket_get(a_idx, shift_amount);
// saturate the count.
if b_val == 0xF {
return;
}
let new_val = b_val + 1;
self.bucket_set(a_idx, shift_amount, new_val);
}
/// Insert a hashed value into the bloom filter.
fn insert_hashed(&mut self, hash: u64) {
self.number_of_insertions += 1;
for h in stretch(&mut to_rng(hash)) {
self.insert_shash(h);
}
}
/// Inserts a value into the bloom filter. Note that the bloom filter isn't
/// parameterized over the values it holds. That's because it can hold
/// values of different types, as long as it can get a hash out of them.
pub fn insert<H: Hash>(&mut self, h: &H) {
self.insert_hashed(hash(h))
}
/// Removes a stretched hash from the bloom filter, taking care not to
/// decrememnt saturated counters.
///
/// It is an error to remove never-inserted elements.
fn remove_shash(&mut self, shash: uint) {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
let b_val = self.bucket_get(a_idx, shift_amount);
assert!(b_val!= 0, "Removing an element that was never inserted.");
// can't do anything if the counter saturated.
if b_val == 0xF { return; }
self.bucket_set(a_idx, shift_amount, b_val - 1);
}
/// Removes a hashed value from the bloom filter.
fn remove_hashed(&mut self, hash: u64) {
self.number_of_insertions -= 1;
for h in stretch(&mut to_rng(hash)) {
self.remove_shash(h);
}
}
/// Removes a value from the bloom filter.
///
/// Be careful of adding and removing lots of elements, especially for
/// long-lived bloom filters. The counters in each bucket will saturate if
/// 16 or more elements hash to it, and then stick there. This will hurt
/// your false positive rate. To fix this, you might consider refreshing the
/// bloom filter by `clear`ing it, and then reinserting elements at regular,
/// long intervals.
///
/// It is an error to remove never-inserted elements.
pub fn remove<H: Hash>(&mut self, h: &H) {
self.remove_hashed(hash(h))
}
/// Returns `true` if the bloom filter cannot possibly contain the given
/// stretched hash.
fn | (&self, shash: uint) -> bool {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
self.bucket_get(a_idx, shift_amount) == 0
}
/// A hash is definitely excluded iff none of the stretched hashes are in
/// the bloom filter.
fn definitely_excludes_hashed(&self, hash: u64) -> bool {
let mut ret = false;
for shash in stretch(&mut to_rng(hash)) {
ret |= self.definitely_excludes_shash(shash);
}
ret
}
/// A bloom filter can tell you whether or not a value has definitely never
/// been inserted. Note that bloom filters can give false positives.
pub fn definitely_excludes<H: Hash>(&self, h: &H) -> bool {
self.definitely_excludes_hashed(hash(h))
}
/// A bloom filter can tell you if an element /may/ be in it. It cannot be
/// certain. But, assuming correct usage, this query will have a low false
// positive rate.
pub fn may_include<H: Hash>(&self, h: &H) -> bool {
!self.definitely_excludes(h)
}
/// Returns the number of elements ever inserted into the bloom filter - the
/// number of elements removed.
pub fn number_of_insertions(&self) -> uint {
self.number_of_insertions
}
/// Returns the number of bytes of memory the bloom filter uses.
pub fn size(&self) -> uint {
self.buf.len() * uint::BYTES
}
/// Removes all elements from the bloom filter. This is both more efficient
/// and has better false-positive properties than repeatedly calling `remove`
/// on every element.
pub fn clear(&mut self) {
self.number_of_insertions = 0;
for x in self.buf.as_mut_slice().mut_iter() {
*x = 0u;
}
}
}
#[test]
fn create_and_insert_some_stuff() {
use std::iter::range;
let mut bf = BloomFilter::new(1000);
for i in range(0u, 1000) {
bf.insert(&i);
}
assert_eq!(bf.number_of_insertions(), 1000);
for i in range(0u, 1000) {
assert!(bf.may_include(&i));
}
let false_positives =
range(1001u, 2000).filter(|i| bf.may_include(&i)).count();
assert!(false_positives < 10) // 1%.
for i in range(0u, 100) {
bf.remove(&i);
}
assert_eq!(bf.number_of_insertions(), 900);
for i in range(100u, 1000) {
assert!(bf.may_include(&i));
}
let false_positives = range(0u, 100).filter(|i| bf.may_include(&i)).count();
assert!(false_positives < 2); // 2%.
bf.clear();
assert_eq!(bf.number_of_insertions(), 0);
for i in range(0u, 2000) {
assert!(bf.definitely_excludes(&i));
}
}
#[cfg(test)]
mod bench {
extern crate test;
use std::hash::hash;
use std::iter;
use super::BloomFilter;
#[bench]
fn create_insert_1000_remove_100_lookup_100(b: &mut test::Bencher) {
b.iter(|| {
let mut bf = BloomFilter::new(1000);
for i in iter::range(0u, 1000) {
bf.insert(&i);
}
for i in iter::range(0u, 100) {
bf.remove(&i);
}
for i in iter::range(100u, 200) {
test::black_box(bf.may_include(&i));
}
});
}
#[bench]
fn may_include(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
for i in iter::range(0u, 1000) {
bf.insert(&i);
}
let mut i = 0u;
b.bench_n(1000, |b| {
b.iter(|| {
test::black_box(bf.may_include(&i));
i += 1;
});
});
}
#[bench]
fn insert(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
b.bench_n(1000, |b| {
let mut i = 0u;
b.iter(|| {
test::black_box(bf.insert(&i));
i += 1;
});
});
}
#[bench]
fn remove(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
for i in range(0u, 1000) {
bf.insert(&i);
}
b.bench_n(1000, |b| {
let mut i = 0u;
b.iter(|| {
bf.remove(&i);
i += 1;
});
});
test::black_box(bf.may_include(&0u));
}
#[bench]
fn hash_a_uint(b: &mut test::Bencher) {
let mut i = 0u;
b.iter(|| {
test::black_box(hash(&i));
i += 1;
})
}
}
| definitely_excludes_shash | identifier_name |
lib.rs | //! Simple counting bloom filters.
#![license = "MIT"]
#![deny(missing_doc)]
extern crate rand;
use rand::Rng;
use std::hash::{hash,Hash};
use std::iter;
use std::num;
use std::uint;
/// A counting bloom filter.
///
/// A bloom filter is a probabilistic data structure which allows you to add and
/// remove elements from a set, query the set for whether it may contain an
/// element or definitely exclude it, and uses much less ram than an equivalent
/// hashtable.
#[deriving(Clone)]
pub struct BloomFilter {
buf: Vec<uint>,
number_of_insertions: uint,
}
// Here's where some of the magic numbers came from:
//
// m = number of elements in the filter
// n = size of the filter
// k = number of hash functions
//
// p = Pr[false positive] = 0.01 false positive rate
//
// if we have an estimation of the number of elements in the bloom filter, we
// know m.
//
// p = (1 - exp(-kn/m))^k
// k = (m/n)ln2
// lnp = -(m/n)(ln2)^2
// m = -nlnp/(ln2)^2
// => n = -m(ln2)^2/lnp
// ~= 10*m
//
// k = (m/n)ln2 = 10ln2 ~= 7
static NUMBER_OF_HASHES: uint = 7;
static BITS_PER_BUCKET: uint = 4;
static BUCKETS_PER_WORD: uint = uint::BITS / BITS_PER_BUCKET;
/// Returns a tuple of (array index, lsr shift amount) to get to the bits you
/// need. Don't forget to mask with 0xF!
fn bucket_index_to_array_index(bucket_index: uint) -> (uint, uint) {
let arr_index = bucket_index / BUCKETS_PER_WORD;
let shift_amount = (bucket_index % BUCKETS_PER_WORD) * BITS_PER_BUCKET;
(arr_index, shift_amount)
}
// Key Stretching
// ==============
//
// Siphash is expensive. Instead of running it `NUMBER_OF_HASHES`, which would
// be a pretty big hit on performance, we just use it to see a non-cryptographic
// random number generator. This stretches the hash to get us our
// `NUMBER_OF_HASHES` array indicies.
//
// A hash is a `u64` and comes from SipHash.
// A shash is a `uint` stretched hash which comes from the XorShiftRng.
fn to_rng(hash: u64) -> rand::XorShiftRng {
let bottom = (hash & 0xFFFFFFFF) as u32;
let top = ((hash >> 32) & 0xFFFFFFFF) as u32;
rand::SeedableRng::from_seed([ 0x97830e05, 0x113ba7bb, bottom, top ])
}
fn stretch<'a>(r: &'a mut rand::XorShiftRng)
-> iter::Take<rand::Generator<'a, uint, rand::XorShiftRng>> {
r.gen_iter().take(NUMBER_OF_HASHES)
}
impl BloomFilter {
/// This bloom filter is tuned to have ~1% false positive rate. In exchange
/// for this guarantee, you need to have a reasonable upper bound on the
/// number of elements that will ever be inserted into it. If you guess too
/// low, your false positive rate will suffer. If you guess too high, you'll
/// use more memory than is really necessary.
pub fn new(expected_number_of_insertions: uint) -> BloomFilter {
let size_in_buckets = 10 * expected_number_of_insertions;
let num_words =
num::checked_next_power_of_two(size_in_buckets / BUCKETS_PER_WORD)
.unwrap();
BloomFilter {
buf: Vec::from_elem(num_words, 0),
number_of_insertions: 0,
}
}
/// Since the array length must be a power of two, this will return a
/// bitmask that can be `&`ed with a number to bring it into the range of
/// the array.
fn mask(&self) -> uint {
(self.buf.len()*BUCKETS_PER_WORD) - 1 //guaranteed to be a power of two
}
/// Converts a stretched hash into a bucket index.
fn shash_to_bucket_index(&self, shash: uint) -> uint {
shash & self.mask()
}
/// Converts a stretched hash into an array and bit index. See the comment
/// on `bucket_index_to_array_index` for details about the return value.
fn shash_to_array_index(&self, shash: uint) -> (uint, uint) {
bucket_index_to_array_index(self.shash_to_bucket_index(shash))
}
/// Gets the value at a given bucket.
fn bucket_get(&self, a_idx: uint, shift_amount: uint) -> uint {
let array_val = self.buf[a_idx];
(array_val >> shift_amount) & 0xF
}
/// Sets the value at a given bucket. This will not bounds check, but that's
/// ok because you've called `bucket_get` first, anyhow.
fn bucket_set(&mut self, a_idx: uint, shift_amount: uint, new_val: uint) {
// We can avoid bounds checking here since in order to do a bucket_set
// we have to had done a `bucket_get` at the same index for it to make
// sense.
let old_val = self.buf.as_mut_slice().get_mut(a_idx).unwrap();
let mask = (1 << BITS_PER_BUCKET) - 1; // selects the right-most bucket
let select_in_bucket = mask << shift_amount; // selects the correct bucket
let select_out_of_bucket =!select_in_bucket; // selects everything except the correct bucket
let new_array_val = (new_val << shift_amount) // move the new_val into the right spot
| (*old_val & select_out_of_bucket); // mask out the old value, and or it with the new one
*old_val = new_array_val;
}
/// Insert a stretched hash into the bloom filter, remembering to saturate
/// the counter instead of overflowing.
fn insert_shash(&mut self, shash: uint) {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
let b_val = self.bucket_get(a_idx, shift_amount);
// saturate the count.
if b_val == 0xF {
return;
}
let new_val = b_val + 1;
self.bucket_set(a_idx, shift_amount, new_val);
}
/// Insert a hashed value into the bloom filter.
fn insert_hashed(&mut self, hash: u64) {
self.number_of_insertions += 1;
for h in stretch(&mut to_rng(hash)) {
self.insert_shash(h);
}
}
| self.insert_hashed(hash(h))
}
/// Removes a stretched hash from the bloom filter, taking care not to
/// decrememnt saturated counters.
///
/// It is an error to remove never-inserted elements.
fn remove_shash(&mut self, shash: uint) {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
let b_val = self.bucket_get(a_idx, shift_amount);
assert!(b_val!= 0, "Removing an element that was never inserted.");
// can't do anything if the counter saturated.
if b_val == 0xF { return; }
self.bucket_set(a_idx, shift_amount, b_val - 1);
}
/// Removes a hashed value from the bloom filter.
fn remove_hashed(&mut self, hash: u64) {
self.number_of_insertions -= 1;
for h in stretch(&mut to_rng(hash)) {
self.remove_shash(h);
}
}
/// Removes a value from the bloom filter.
///
/// Be careful of adding and removing lots of elements, especially for
/// long-lived bloom filters. The counters in each bucket will saturate if
/// 16 or more elements hash to it, and then stick there. This will hurt
/// your false positive rate. To fix this, you might consider refreshing the
/// bloom filter by `clear`ing it, and then reinserting elements at regular,
/// long intervals.
///
/// It is an error to remove never-inserted elements.
pub fn remove<H: Hash>(&mut self, h: &H) {
self.remove_hashed(hash(h))
}
/// Returns `true` if the bloom filter cannot possibly contain the given
/// stretched hash.
fn definitely_excludes_shash(&self, shash: uint) -> bool {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
self.bucket_get(a_idx, shift_amount) == 0
}
/// A hash is definitely excluded iff none of the stretched hashes are in
/// the bloom filter.
fn definitely_excludes_hashed(&self, hash: u64) -> bool {
let mut ret = false;
for shash in stretch(&mut to_rng(hash)) {
ret |= self.definitely_excludes_shash(shash);
}
ret
}
/// A bloom filter can tell you whether or not a value has definitely never
/// been inserted. Note that bloom filters can give false positives.
pub fn definitely_excludes<H: Hash>(&self, h: &H) -> bool {
self.definitely_excludes_hashed(hash(h))
}
/// A bloom filter can tell you if an element /may/ be in it. It cannot be
/// certain. But, assuming correct usage, this query will have a low false
// positive rate.
pub fn may_include<H: Hash>(&self, h: &H) -> bool {
!self.definitely_excludes(h)
}
/// Returns the number of elements ever inserted into the bloom filter - the
/// number of elements removed.
pub fn number_of_insertions(&self) -> uint {
self.number_of_insertions
}
/// Returns the number of bytes of memory the bloom filter uses.
pub fn size(&self) -> uint {
self.buf.len() * uint::BYTES
}
/// Removes all elements from the bloom filter. This is both more efficient
/// and has better false-positive properties than repeatedly calling `remove`
/// on every element.
pub fn clear(&mut self) {
self.number_of_insertions = 0;
for x in self.buf.as_mut_slice().mut_iter() {
*x = 0u;
}
}
}
#[test]
fn create_and_insert_some_stuff() {
use std::iter::range;
let mut bf = BloomFilter::new(1000);
for i in range(0u, 1000) {
bf.insert(&i);
}
assert_eq!(bf.number_of_insertions(), 1000);
for i in range(0u, 1000) {
assert!(bf.may_include(&i));
}
let false_positives =
range(1001u, 2000).filter(|i| bf.may_include(&i)).count();
assert!(false_positives < 10) // 1%.
for i in range(0u, 100) {
bf.remove(&i);
}
assert_eq!(bf.number_of_insertions(), 900);
for i in range(100u, 1000) {
assert!(bf.may_include(&i));
}
let false_positives = range(0u, 100).filter(|i| bf.may_include(&i)).count();
assert!(false_positives < 2); // 2%.
bf.clear();
assert_eq!(bf.number_of_insertions(), 0);
for i in range(0u, 2000) {
assert!(bf.definitely_excludes(&i));
}
}
#[cfg(test)]
mod bench {
extern crate test;
use std::hash::hash;
use std::iter;
use super::BloomFilter;
#[bench]
fn create_insert_1000_remove_100_lookup_100(b: &mut test::Bencher) {
b.iter(|| {
let mut bf = BloomFilter::new(1000);
for i in iter::range(0u, 1000) {
bf.insert(&i);
}
for i in iter::range(0u, 100) {
bf.remove(&i);
}
for i in iter::range(100u, 200) {
test::black_box(bf.may_include(&i));
}
});
}
#[bench]
fn may_include(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
for i in iter::range(0u, 1000) {
bf.insert(&i);
}
let mut i = 0u;
b.bench_n(1000, |b| {
b.iter(|| {
test::black_box(bf.may_include(&i));
i += 1;
});
});
}
#[bench]
fn insert(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
b.bench_n(1000, |b| {
let mut i = 0u;
b.iter(|| {
test::black_box(bf.insert(&i));
i += 1;
});
});
}
#[bench]
fn remove(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
for i in range(0u, 1000) {
bf.insert(&i);
}
b.bench_n(1000, |b| {
let mut i = 0u;
b.iter(|| {
bf.remove(&i);
i += 1;
});
});
test::black_box(bf.may_include(&0u));
}
#[bench]
fn hash_a_uint(b: &mut test::Bencher) {
let mut i = 0u;
b.iter(|| {
test::black_box(hash(&i));
i += 1;
})
}
} | /// Inserts a value into the bloom filter. Note that the bloom filter isn't
/// parameterized over the values it holds. That's because it can hold
/// values of different types, as long as it can get a hash out of them.
pub fn insert<H: Hash>(&mut self, h: &H) { | random_line_split |
lib.rs | //! Simple counting bloom filters.
#![license = "MIT"]
#![deny(missing_doc)]
extern crate rand;
use rand::Rng;
use std::hash::{hash,Hash};
use std::iter;
use std::num;
use std::uint;
/// A counting bloom filter.
///
/// A bloom filter is a probabilistic data structure which allows you to add and
/// remove elements from a set, query the set for whether it may contain an
/// element or definitely exclude it, and uses much less ram than an equivalent
/// hashtable.
#[deriving(Clone)]
pub struct BloomFilter {
buf: Vec<uint>,
number_of_insertions: uint,
}
// Here's where some of the magic numbers came from:
//
// m = number of elements in the filter
// n = size of the filter
// k = number of hash functions
//
// p = Pr[false positive] = 0.01 false positive rate
//
// if we have an estimation of the number of elements in the bloom filter, we
// know m.
//
// p = (1 - exp(-kn/m))^k
// k = (m/n)ln2
// lnp = -(m/n)(ln2)^2
// m = -nlnp/(ln2)^2
// => n = -m(ln2)^2/lnp
// ~= 10*m
//
// k = (m/n)ln2 = 10ln2 ~= 7
static NUMBER_OF_HASHES: uint = 7;
static BITS_PER_BUCKET: uint = 4;
static BUCKETS_PER_WORD: uint = uint::BITS / BITS_PER_BUCKET;
/// Returns a tuple of (array index, lsr shift amount) to get to the bits you
/// need. Don't forget to mask with 0xF!
fn bucket_index_to_array_index(bucket_index: uint) -> (uint, uint) {
let arr_index = bucket_index / BUCKETS_PER_WORD;
let shift_amount = (bucket_index % BUCKETS_PER_WORD) * BITS_PER_BUCKET;
(arr_index, shift_amount)
}
// Key Stretching
// ==============
//
// Siphash is expensive. Instead of running it `NUMBER_OF_HASHES`, which would
// be a pretty big hit on performance, we just use it to see a non-cryptographic
// random number generator. This stretches the hash to get us our
// `NUMBER_OF_HASHES` array indicies.
//
// A hash is a `u64` and comes from SipHash.
// A shash is a `uint` stretched hash which comes from the XorShiftRng.
fn to_rng(hash: u64) -> rand::XorShiftRng {
let bottom = (hash & 0xFFFFFFFF) as u32;
let top = ((hash >> 32) & 0xFFFFFFFF) as u32;
rand::SeedableRng::from_seed([ 0x97830e05, 0x113ba7bb, bottom, top ])
}
fn stretch<'a>(r: &'a mut rand::XorShiftRng)
-> iter::Take<rand::Generator<'a, uint, rand::XorShiftRng>> {
r.gen_iter().take(NUMBER_OF_HASHES)
}
impl BloomFilter {
/// This bloom filter is tuned to have ~1% false positive rate. In exchange
/// for this guarantee, you need to have a reasonable upper bound on the
/// number of elements that will ever be inserted into it. If you guess too
/// low, your false positive rate will suffer. If you guess too high, you'll
/// use more memory than is really necessary.
pub fn new(expected_number_of_insertions: uint) -> BloomFilter {
let size_in_buckets = 10 * expected_number_of_insertions;
let num_words =
num::checked_next_power_of_two(size_in_buckets / BUCKETS_PER_WORD)
.unwrap();
BloomFilter {
buf: Vec::from_elem(num_words, 0),
number_of_insertions: 0,
}
}
/// Since the array length must be a power of two, this will return a
/// bitmask that can be `&`ed with a number to bring it into the range of
/// the array.
fn mask(&self) -> uint {
(self.buf.len()*BUCKETS_PER_WORD) - 1 //guaranteed to be a power of two
}
/// Converts a stretched hash into a bucket index.
fn shash_to_bucket_index(&self, shash: uint) -> uint {
shash & self.mask()
}
/// Converts a stretched hash into an array and bit index. See the comment
/// on `bucket_index_to_array_index` for details about the return value.
fn shash_to_array_index(&self, shash: uint) -> (uint, uint) {
bucket_index_to_array_index(self.shash_to_bucket_index(shash))
}
/// Gets the value at a given bucket.
fn bucket_get(&self, a_idx: uint, shift_amount: uint) -> uint {
let array_val = self.buf[a_idx];
(array_val >> shift_amount) & 0xF
}
/// Sets the value at a given bucket. This will not bounds check, but that's
/// ok because you've called `bucket_get` first, anyhow.
fn bucket_set(&mut self, a_idx: uint, shift_amount: uint, new_val: uint) {
// We can avoid bounds checking here since in order to do a bucket_set
// we have to had done a `bucket_get` at the same index for it to make
// sense.
let old_val = self.buf.as_mut_slice().get_mut(a_idx).unwrap();
let mask = (1 << BITS_PER_BUCKET) - 1; // selects the right-most bucket
let select_in_bucket = mask << shift_amount; // selects the correct bucket
let select_out_of_bucket =!select_in_bucket; // selects everything except the correct bucket
let new_array_val = (new_val << shift_amount) // move the new_val into the right spot
| (*old_val & select_out_of_bucket); // mask out the old value, and or it with the new one
*old_val = new_array_val;
}
/// Insert a stretched hash into the bloom filter, remembering to saturate
/// the counter instead of overflowing.
fn insert_shash(&mut self, shash: uint) {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
let b_val = self.bucket_get(a_idx, shift_amount);
// saturate the count.
if b_val == 0xF |
let new_val = b_val + 1;
self.bucket_set(a_idx, shift_amount, new_val);
}
/// Insert a hashed value into the bloom filter.
fn insert_hashed(&mut self, hash: u64) {
self.number_of_insertions += 1;
for h in stretch(&mut to_rng(hash)) {
self.insert_shash(h);
}
}
/// Inserts a value into the bloom filter. Note that the bloom filter isn't
/// parameterized over the values it holds. That's because it can hold
/// values of different types, as long as it can get a hash out of them.
pub fn insert<H: Hash>(&mut self, h: &H) {
self.insert_hashed(hash(h))
}
/// Removes a stretched hash from the bloom filter, taking care not to
/// decrememnt saturated counters.
///
/// It is an error to remove never-inserted elements.
fn remove_shash(&mut self, shash: uint) {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
let b_val = self.bucket_get(a_idx, shift_amount);
assert!(b_val!= 0, "Removing an element that was never inserted.");
// can't do anything if the counter saturated.
if b_val == 0xF { return; }
self.bucket_set(a_idx, shift_amount, b_val - 1);
}
/// Removes a hashed value from the bloom filter.
fn remove_hashed(&mut self, hash: u64) {
self.number_of_insertions -= 1;
for h in stretch(&mut to_rng(hash)) {
self.remove_shash(h);
}
}
/// Removes a value from the bloom filter.
///
/// Be careful of adding and removing lots of elements, especially for
/// long-lived bloom filters. The counters in each bucket will saturate if
/// 16 or more elements hash to it, and then stick there. This will hurt
/// your false positive rate. To fix this, you might consider refreshing the
/// bloom filter by `clear`ing it, and then reinserting elements at regular,
/// long intervals.
///
/// It is an error to remove never-inserted elements.
pub fn remove<H: Hash>(&mut self, h: &H) {
self.remove_hashed(hash(h))
}
/// Returns `true` if the bloom filter cannot possibly contain the given
/// stretched hash.
fn definitely_excludes_shash(&self, shash: uint) -> bool {
let (a_idx, shift_amount) = self.shash_to_array_index(shash);
self.bucket_get(a_idx, shift_amount) == 0
}
/// A hash is definitely excluded iff none of the stretched hashes are in
/// the bloom filter.
fn definitely_excludes_hashed(&self, hash: u64) -> bool {
let mut ret = false;
for shash in stretch(&mut to_rng(hash)) {
ret |= self.definitely_excludes_shash(shash);
}
ret
}
/// A bloom filter can tell you whether or not a value has definitely never
/// been inserted. Note that bloom filters can give false positives.
pub fn definitely_excludes<H: Hash>(&self, h: &H) -> bool {
self.definitely_excludes_hashed(hash(h))
}
/// A bloom filter can tell you if an element /may/ be in it. It cannot be
/// certain. But, assuming correct usage, this query will have a low false
// positive rate.
pub fn may_include<H: Hash>(&self, h: &H) -> bool {
!self.definitely_excludes(h)
}
/// Returns the number of elements ever inserted into the bloom filter - the
/// number of elements removed.
pub fn number_of_insertions(&self) -> uint {
self.number_of_insertions
}
/// Returns the number of bytes of memory the bloom filter uses.
pub fn size(&self) -> uint {
self.buf.len() * uint::BYTES
}
/// Removes all elements from the bloom filter. This is both more efficient
/// and has better false-positive properties than repeatedly calling `remove`
/// on every element.
pub fn clear(&mut self) {
self.number_of_insertions = 0;
for x in self.buf.as_mut_slice().mut_iter() {
*x = 0u;
}
}
}
#[test]
fn create_and_insert_some_stuff() {
use std::iter::range;
let mut bf = BloomFilter::new(1000);
for i in range(0u, 1000) {
bf.insert(&i);
}
assert_eq!(bf.number_of_insertions(), 1000);
for i in range(0u, 1000) {
assert!(bf.may_include(&i));
}
let false_positives =
range(1001u, 2000).filter(|i| bf.may_include(&i)).count();
assert!(false_positives < 10) // 1%.
for i in range(0u, 100) {
bf.remove(&i);
}
assert_eq!(bf.number_of_insertions(), 900);
for i in range(100u, 1000) {
assert!(bf.may_include(&i));
}
let false_positives = range(0u, 100).filter(|i| bf.may_include(&i)).count();
assert!(false_positives < 2); // 2%.
bf.clear();
assert_eq!(bf.number_of_insertions(), 0);
for i in range(0u, 2000) {
assert!(bf.definitely_excludes(&i));
}
}
#[cfg(test)]
mod bench {
extern crate test;
use std::hash::hash;
use std::iter;
use super::BloomFilter;
#[bench]
fn create_insert_1000_remove_100_lookup_100(b: &mut test::Bencher) {
b.iter(|| {
let mut bf = BloomFilter::new(1000);
for i in iter::range(0u, 1000) {
bf.insert(&i);
}
for i in iter::range(0u, 100) {
bf.remove(&i);
}
for i in iter::range(100u, 200) {
test::black_box(bf.may_include(&i));
}
});
}
#[bench]
fn may_include(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
for i in iter::range(0u, 1000) {
bf.insert(&i);
}
let mut i = 0u;
b.bench_n(1000, |b| {
b.iter(|| {
test::black_box(bf.may_include(&i));
i += 1;
});
});
}
#[bench]
fn insert(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
b.bench_n(1000, |b| {
let mut i = 0u;
b.iter(|| {
test::black_box(bf.insert(&i));
i += 1;
});
});
}
#[bench]
fn remove(b: &mut test::Bencher) {
let mut bf = BloomFilter::new(1000);
for i in range(0u, 1000) {
bf.insert(&i);
}
b.bench_n(1000, |b| {
let mut i = 0u;
b.iter(|| {
bf.remove(&i);
i += 1;
});
});
test::black_box(bf.may_include(&0u));
}
#[bench]
fn hash_a_uint(b: &mut test::Bencher) {
let mut i = 0u;
b.iter(|| {
test::black_box(hash(&i));
i += 1;
})
}
}
| {
return;
} | conditional_block |
hypercalls.rs |
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
pub enum Command {
set_trap_table = 0,
mmu_update = 1,
set_gdt = 2,
stack_switch = 3,
set_callbacks = 4,
fpu_taskswitch = 5,
sched_op_compat = 6,
platform_op = 7,
set_debugreg = 8,
get_debugreg = 9,
update_descriptor = 10,
memory_op = 12,
multicall = 13,
update_va_mapping = 14,
set_timer_op = 15,
event_channel_op_compat = 16,
xen_version = 17,
console_io = 18,
physdev_op_compat = 19,
grant_table_op = 20,
vm_assist = 21,
update_va_mapping_otherdomain = 22,
iret = 23,
vcpu_op = 24,
set_segment_base = 25,
mmuext_op = 26,
xsm_op = 27,
nmi_op = 28,
sched_op = 29,
callback_op = 30,
xenoprof_op = 31,
event_channel_op = 32,
physdev_op = 33,
hvm_op = 34,
sysctl = 35,
domctl = 36,
kexec_op = 37,
tmem_op = 38,
xc_reserved_op = 39,
xen_pmu_op = 40,
arch_0 = 48,
arch_1 = 49,
arch_2 = 50,
arch_3 = 51,
arch_4 = 52,
arch_5 = 53,
arch_6 = 54,
arch_7 = 55
}
//pub mod set_trap_table;
//pub mod mmu_update;
//pub mod set_gdt;
//pub mod stack_switch;
//pub mod set_callbacks;
//pub mod fpu_taskswitch;
//pub mod sched_op_compat;
//pub mod platform_op;
//pub mod set_debugreg;
//pub mod get_debugreg;
//pub mod update_descriptor;
//pub mod memory_op;
//pub mod multicall;
//pub mod update_va_mapping;
//pub mod set_timer_op;
//pub mod event_channel_op_compat;
//pub mod xen_version;
pub mod console_io {
use xen::ffi::arch::x86_64::hypercall;
use xen::ffi::hypercalls::Command;
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
pub enum SubCommand {
write = 0,
read = 1
}
pub fn write(buf: &[u8]) {
hypercall(Command::console_io, SubCommand::write as usize, buf.len(), buf.as_ptr(), 0);
}
}
//pub mod physdev_op_compat;
pub mod grant_table_op {
use xen::ffi::MachineFrameNumber;
use xen::ffi::XenGuestHandle;
use xen::ffi::arch::x86_64::hypercall;
use xen::ffi::DomID;
#[repr(usize)]
#[allow(non_camel_case_types)]
enum SubCommand {
map_grant_ref = 0,
unmap_grant_ref = 1,
setup_table = 2,
dump_table = 3,
tranfer = 4,
copy = 5,
query_size = 6,
unmap_and_replace = 7,
set_version = 8,
get_status_frames = 9,
get_version = 10,
swap_grant_ref = 11,
cache_flush = 12
}
//pub mod map_grant_ref;
//pub mod unmap_grant ref;
#[derive(Debug)]
#[repr(C)]
struct SetupTableArgs {
dom : DomID,
nr_frames : u32,
/// Output
status : i16,
/// Output
frame_list : XenGuestHandle<MachineFrameNumber<[u8; 1024]>>
}
/*
pub unsafe fn arch_init_gnttab(nr_grant_frames : u32) {
// TODO: FIX
let frames = [0u64; 16];
let mut args = SetupTableArgs {
dom: DomID::SELF,
nr_frames: nr_grant_frames,
status: 0,
frame_list: XenGuestHandle(PageFrameNumber(&frames[0] as *)) // OK because we know we have > 0 elements
};
let _result = hypercall!(
i64,
Command::grant_table_op,
SubCommand::setup_table,
&mut args as *mut SetupTableArgs,
16, // number of frames
1u32 // number of arguments: 1
);
//map_frames(frames) // TODO maybe -
}
*/
//pub mod dump_table;
//pub mod transfer;
//pub mod copy;
//pub mod query_size;
//pub mod unmap_and_replace;
//pub mod set_version;
//pub mod get_status_frames;
//pub mod get_version;
//pub mod swap_grant_ref;
//pub mod cache_flush;
}
//pub mod vm_assist;
//pub mod update_va_mapping_otherdomain;
//pub mod iret;
//pub mod vcpu_op;
//pub mod set_segment_base;
//pub mod mmuext_op;
//pub mod xsm_op;
//pub mod nmi_op;
pub mod sched_op {
use xen::ffi::hypercalls::Command;
use xen::ffi::arch::x86_64::hypercall;
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
enum SubCommand {
yield_ = 0,
block = 1,
shutdown = 2,
poll = 3,
remote_shutdown = 4,
shutdown_code = 5,
watchdog = 6
}
//pub mod yield_;
//pub mod block;
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
pub enum ShutdownReason {
poweroff = 0,
reboot = 1,
suspend = 2,
crash = 3,
watchdog = 4
}
#[repr(C)]
#[derive(Debug)]
struct ShutdownArgs {
reason: ShutdownReason
}
pub fn shutdown(reason: ShutdownReason) ->! {
hypercall(
Command::sched_op,
SubCommand::shutdown as usize,
&ShutdownArgs {
reason: reason
} as *const ShutdownArgs as usize
);
loop {}
}
//pub mod poll;
//pub mod remote_shutdown;
//pub mod shutdown_code;
//pub mod watchdog;
}
//pub mod callback_op;
//pub mod xenoprof_op;
pub mod event_channel_op {
use xen::ffi::hypercalls::{Command, NegErrnoval};
use xen::ffi::{DomID, Port, Vcpu};
use xen::ffi::arch::x86_64::hypercall;
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
enum SubCommand {
bind_interdomain = 0,
bind_virq = 1,
bind_pirq = 2,
close = 3,
send = 4,
status = 5,
alloc_unbound = 6,
bind_ipi = 7,
bind_vcpu = 8,
unmask = 9,
reset = 10,
init_control = 11,
expand_array = 12,
set_priority = 13
}
#[derive(Debug)]
#[repr(C)]
struct BindInterdomainArgs {
remote_dom: DomID,
remote_port: Port,
/// Output
local_port: Port
}
#[derive(Debug)]
#[repr(C)]
struct BindVirqArgs {
virq: Virq,
cpu: Vcpu,
/// Output
port: Port
}
#[derive(Debug)]
#[repr(u32)]
enum Virq {
Timer = 0,
Debug = 1,
Console = 2,
DomExc = 3,
Tbuf = 4,
Debugger = 6,
Xenoprof = 7,
ConRing = 8,
PcpuState = 9,
MemEvent = 10,
XcReserved = 11,
Enomem = 12,
Xenpmu = 13,
Arch0 = 16,
Arch1 = 17,
Arch2 = 18,
Arch3 = 19,
Arch4 = 20,
Arch5 = 21,
Arch6 = 22,
Arch7 = 23
}
//pub mod bind_pirq;
pub fn close (p: Port) {
unsafe {
let mut args = CloseArgs { port: p };
let _result = hypercall(
Command::event_channel_op,
SubCommand::close as usize,
&mut args as *mut CloseArgs as usize
);
}
}
#[derive(Debug)]
#[repr(C)]
struct CloseArgs {
port: Port
}
pub fn | (port: &mut Port) -> NegErrnoval {
unsafe {
use core::mem;
use core::ptr;
let mut args: SendArgs = mem::uninitialized();
args.port = ptr::read(port);
hypercall(
Command::event_channel_op,
SubCommand::send as usize,
&mut args as *mut _ as usize
)
}
}
#[derive(Debug)]
#[repr(C)]
struct SendArgs {
port: Port
}
//pub mod status;
#[derive(Debug)]
#[repr(C)]
struct AllocUnboundArgs {
dom: DomID,
remote_dom: DomID,
/// Output
port: Port
}
//pub mod bind_ipi;
//pub mod bind_vcpu;
//pub mod unmask;
//pub mod reset;
//pub mod init_control;
//pub mod expand_array;
//pub mod set_priority;
}
//pub mod physdev_op;
//pub mod hvm_op;
//pub mod sysctl;
//pub mod domctl;
//pub mod kexec_op;
//pub mod tmem_op;
//pub mod xc_reserved_op;
//pub mod xen_pmu_op;
//pub mod arch_0;
//pub mod arch_1;
//pub mod arch_2;
//pub mod arch_3;
//pub mod arch_4;
//pub mod arch_5;
//pub mod arch_6;
//pub mod arch_7;
#[repr(i64)]
#[derive(Debug, Clone, PartialEq, Copy)]
pub enum NegErrnoval {
ALLGOOD = 0,
EPERM = -1,
ENOENT = -2,
ESRCH = -3,
EINTR = -4,
EIO = -5,
ENXIO = -6,
E2BIG = -7,
ENOEXEC = -8,
EBADF = -9,
ECHILD = -10,
EAGAIN = -11,
ENOMEM = -12,
EACCES = -13,
EFAULT = -14,
ENOTBLK = -15,
EBUSY = -16,
EEXIST = -17,
EXDEV = -18,
ENODEV = -19,
ENOTDIR = -20,
EISDIR = -21,
EINVAL = -22,
ENFILE = -23,
EMFILE = -24,
ENOTTY = -25,
ETXTBSY = -26,
EFBIG = -27,
ENOSPC = -28,
ESPIPE = -29,
EROFS = -30,
EMLINK = -31,
EPIPE = -32,
EDOM = -33,
ERANGE = -34,
EDEADLK = -35,
ENAMETOOLONG = -36,
ENOLCK = -37,
ENOSYS = -38,
ENOTEMPTY = -39,
ELOOP = -40,
ENOMSG = -42,
EIDRM = -43,
ECHRNG = -44,
EL2NSYNC = -45,
EL3HLT = -46,
EL3RST = -47,
ELNRNG = -48,
EUNATCH = -49,
ENOCSI = -50,
EL2HLT = -51,
EBADE = -52,
EBADR = -53,
EXFULL = -54,
ENOANO = -55,
EBADRQC = -56,
EBADSLT = -57,
EBFONT = -59,
ENOSTR = -60,
ENODATA = -61,
ETIME = -62,
ENOSR = -63,
ENONET = -64,
ENOPKG = -65,
EREMOTE = -66,
ENOLINK = -67,
EADV = -68,
ESRMNT = -69,
ECOMM = -70,
EPROTO = -71,
EMULTIHOP = -72,
EDOTDOT = -73,
EBADMSG = -74,
EOVERFLOW = -75,
ENOTUNIQ = -76,
EBADFD = -77,
EREMCHG = -78,
ELIBACC = -79,
ELIBBAD = -80,
ELIBSCN = -81,
ELIBMAX = -82,
ELIBEXEC = -83,
EILSEQ = -84,
ERESTART = -85,
ESTRPIPE = -86,
EUSERS = -87,
ENOTSOCK = -88,
EDESTADDRREQ = -89,
EMSGSIZE = -90,
EPROTOTYPE = -91,
ENOPROTOOPT = -92,
EPROTONOSUPPORT = -93,
ESOCKTNOSUPPORT = -94,
EOPNOTSUPP = -95,
EPFNOSUPPORT = -96,
EAFNOSUPPORT = -97,
EADDRINUSE = -98,
EADDRNOTAVAIL = -99,
ENETDOWN = -100,
ENETUNREACH = -101,
ENETRESET = -102,
ECONNABORTED = -103,
ECONNRESET = -104,
ENOBUFS = -105,
EISCONN = -106,
ENOTCONN = -107,
ESHUTDOWN = -108,
ETOOMANYREFS = -109,
ETIMEDOUT = -110,
ECONNREFUSED = -111,
EHOSTDOWN = -112,
EHOSTUNREACH = -113,
EALREADY = -114,
EINPROGRESS = -115,
ESTALE = -116,
EUCLEAN = -117,
ENOTNAM = -118,
ENAVAIL = -119,
EISNAM = -120,
EREMOTEIO = -121,
EDQUOT = -122,
ENOMEDIUM = -123,
EMEDIUMTYPE = -124,
ECANCELED = -125,
ENOKEY = -126,
EKEYEXPIRED = -127,
EKEYREVOKED = -128,
EKEYREJECTED = -129,
EOWNERDEAD = -130,
ENOTRECOVERABLE = -131,
ERFKILL = -132,
EHWPOISON = -133,
}
| send | identifier_name |
hypercalls.rs | #[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
pub enum Command {
set_trap_table = 0,
mmu_update = 1,
set_gdt = 2,
stack_switch = 3,
set_callbacks = 4,
fpu_taskswitch = 5,
sched_op_compat = 6,
platform_op = 7,
set_debugreg = 8,
get_debugreg = 9,
update_descriptor = 10,
memory_op = 12,
multicall = 13,
update_va_mapping = 14,
set_timer_op = 15,
event_channel_op_compat = 16,
xen_version = 17,
console_io = 18,
physdev_op_compat = 19,
grant_table_op = 20,
vm_assist = 21,
update_va_mapping_otherdomain = 22,
iret = 23,
vcpu_op = 24,
set_segment_base = 25,
mmuext_op = 26,
xsm_op = 27,
nmi_op = 28,
sched_op = 29,
callback_op = 30,
xenoprof_op = 31, | domctl = 36,
kexec_op = 37,
tmem_op = 38,
xc_reserved_op = 39,
xen_pmu_op = 40,
arch_0 = 48,
arch_1 = 49,
arch_2 = 50,
arch_3 = 51,
arch_4 = 52,
arch_5 = 53,
arch_6 = 54,
arch_7 = 55
}
//pub mod set_trap_table;
//pub mod mmu_update;
//pub mod set_gdt;
//pub mod stack_switch;
//pub mod set_callbacks;
//pub mod fpu_taskswitch;
//pub mod sched_op_compat;
//pub mod platform_op;
//pub mod set_debugreg;
//pub mod get_debugreg;
//pub mod update_descriptor;
//pub mod memory_op;
//pub mod multicall;
//pub mod update_va_mapping;
//pub mod set_timer_op;
//pub mod event_channel_op_compat;
//pub mod xen_version;
pub mod console_io {
use xen::ffi::arch::x86_64::hypercall;
use xen::ffi::hypercalls::Command;
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
pub enum SubCommand {
write = 0,
read = 1
}
pub fn write(buf: &[u8]) {
hypercall(Command::console_io, SubCommand::write as usize, buf.len(), buf.as_ptr(), 0);
}
}
//pub mod physdev_op_compat;
pub mod grant_table_op {
use xen::ffi::MachineFrameNumber;
use xen::ffi::XenGuestHandle;
use xen::ffi::arch::x86_64::hypercall;
use xen::ffi::DomID;
#[repr(usize)]
#[allow(non_camel_case_types)]
enum SubCommand {
map_grant_ref = 0,
unmap_grant_ref = 1,
setup_table = 2,
dump_table = 3,
tranfer = 4,
copy = 5,
query_size = 6,
unmap_and_replace = 7,
set_version = 8,
get_status_frames = 9,
get_version = 10,
swap_grant_ref = 11,
cache_flush = 12
}
//pub mod map_grant_ref;
//pub mod unmap_grant ref;
#[derive(Debug)]
#[repr(C)]
struct SetupTableArgs {
dom : DomID,
nr_frames : u32,
/// Output
status : i16,
/// Output
frame_list : XenGuestHandle<MachineFrameNumber<[u8; 1024]>>
}
/*
pub unsafe fn arch_init_gnttab(nr_grant_frames : u32) {
// TODO: FIX
let frames = [0u64; 16];
let mut args = SetupTableArgs {
dom: DomID::SELF,
nr_frames: nr_grant_frames,
status: 0,
frame_list: XenGuestHandle(PageFrameNumber(&frames[0] as *)) // OK because we know we have > 0 elements
};
let _result = hypercall!(
i64,
Command::grant_table_op,
SubCommand::setup_table,
&mut args as *mut SetupTableArgs,
16, // number of frames
1u32 // number of arguments: 1
);
//map_frames(frames) // TODO maybe -
}
*/
//pub mod dump_table;
//pub mod transfer;
//pub mod copy;
//pub mod query_size;
//pub mod unmap_and_replace;
//pub mod set_version;
//pub mod get_status_frames;
//pub mod get_version;
//pub mod swap_grant_ref;
//pub mod cache_flush;
}
//pub mod vm_assist;
//pub mod update_va_mapping_otherdomain;
//pub mod iret;
//pub mod vcpu_op;
//pub mod set_segment_base;
//pub mod mmuext_op;
//pub mod xsm_op;
//pub mod nmi_op;
pub mod sched_op {
use xen::ffi::hypercalls::Command;
use xen::ffi::arch::x86_64::hypercall;
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
enum SubCommand {
yield_ = 0,
block = 1,
shutdown = 2,
poll = 3,
remote_shutdown = 4,
shutdown_code = 5,
watchdog = 6
}
//pub mod yield_;
//pub mod block;
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
pub enum ShutdownReason {
poweroff = 0,
reboot = 1,
suspend = 2,
crash = 3,
watchdog = 4
}
#[repr(C)]
#[derive(Debug)]
struct ShutdownArgs {
reason: ShutdownReason
}
pub fn shutdown(reason: ShutdownReason) ->! {
hypercall(
Command::sched_op,
SubCommand::shutdown as usize,
&ShutdownArgs {
reason: reason
} as *const ShutdownArgs as usize
);
loop {}
}
//pub mod poll;
//pub mod remote_shutdown;
//pub mod shutdown_code;
//pub mod watchdog;
}
//pub mod callback_op;
//pub mod xenoprof_op;
pub mod event_channel_op {
use xen::ffi::hypercalls::{Command, NegErrnoval};
use xen::ffi::{DomID, Port, Vcpu};
use xen::ffi::arch::x86_64::hypercall;
#[derive(Debug)]
#[repr(usize)]
#[allow(non_camel_case_types)]
enum SubCommand {
bind_interdomain = 0,
bind_virq = 1,
bind_pirq = 2,
close = 3,
send = 4,
status = 5,
alloc_unbound = 6,
bind_ipi = 7,
bind_vcpu = 8,
unmask = 9,
reset = 10,
init_control = 11,
expand_array = 12,
set_priority = 13
}
#[derive(Debug)]
#[repr(C)]
struct BindInterdomainArgs {
remote_dom: DomID,
remote_port: Port,
/// Output
local_port: Port
}
#[derive(Debug)]
#[repr(C)]
struct BindVirqArgs {
virq: Virq,
cpu: Vcpu,
/// Output
port: Port
}
#[derive(Debug)]
#[repr(u32)]
enum Virq {
Timer = 0,
Debug = 1,
Console = 2,
DomExc = 3,
Tbuf = 4,
Debugger = 6,
Xenoprof = 7,
ConRing = 8,
PcpuState = 9,
MemEvent = 10,
XcReserved = 11,
Enomem = 12,
Xenpmu = 13,
Arch0 = 16,
Arch1 = 17,
Arch2 = 18,
Arch3 = 19,
Arch4 = 20,
Arch5 = 21,
Arch6 = 22,
Arch7 = 23
}
//pub mod bind_pirq;
pub fn close (p: Port) {
unsafe {
let mut args = CloseArgs { port: p };
let _result = hypercall(
Command::event_channel_op,
SubCommand::close as usize,
&mut args as *mut CloseArgs as usize
);
}
}
#[derive(Debug)]
#[repr(C)]
struct CloseArgs {
port: Port
}
pub fn send(port: &mut Port) -> NegErrnoval {
unsafe {
use core::mem;
use core::ptr;
let mut args: SendArgs = mem::uninitialized();
args.port = ptr::read(port);
hypercall(
Command::event_channel_op,
SubCommand::send as usize,
&mut args as *mut _ as usize
)
}
}
#[derive(Debug)]
#[repr(C)]
struct SendArgs {
port: Port
}
//pub mod status;
#[derive(Debug)]
#[repr(C)]
struct AllocUnboundArgs {
dom: DomID,
remote_dom: DomID,
/// Output
port: Port
}
//pub mod bind_ipi;
//pub mod bind_vcpu;
//pub mod unmask;
//pub mod reset;
//pub mod init_control;
//pub mod expand_array;
//pub mod set_priority;
}
//pub mod physdev_op;
//pub mod hvm_op;
//pub mod sysctl;
//pub mod domctl;
//pub mod kexec_op;
//pub mod tmem_op;
//pub mod xc_reserved_op;
//pub mod xen_pmu_op;
//pub mod arch_0;
//pub mod arch_1;
//pub mod arch_2;
//pub mod arch_3;
//pub mod arch_4;
//pub mod arch_5;
//pub mod arch_6;
//pub mod arch_7;
#[repr(i64)]
#[derive(Debug, Clone, PartialEq, Copy)]
pub enum NegErrnoval {
ALLGOOD = 0,
EPERM = -1,
ENOENT = -2,
ESRCH = -3,
EINTR = -4,
EIO = -5,
ENXIO = -6,
E2BIG = -7,
ENOEXEC = -8,
EBADF = -9,
ECHILD = -10,
EAGAIN = -11,
ENOMEM = -12,
EACCES = -13,
EFAULT = -14,
ENOTBLK = -15,
EBUSY = -16,
EEXIST = -17,
EXDEV = -18,
ENODEV = -19,
ENOTDIR = -20,
EISDIR = -21,
EINVAL = -22,
ENFILE = -23,
EMFILE = -24,
ENOTTY = -25,
ETXTBSY = -26,
EFBIG = -27,
ENOSPC = -28,
ESPIPE = -29,
EROFS = -30,
EMLINK = -31,
EPIPE = -32,
EDOM = -33,
ERANGE = -34,
EDEADLK = -35,
ENAMETOOLONG = -36,
ENOLCK = -37,
ENOSYS = -38,
ENOTEMPTY = -39,
ELOOP = -40,
ENOMSG = -42,
EIDRM = -43,
ECHRNG = -44,
EL2NSYNC = -45,
EL3HLT = -46,
EL3RST = -47,
ELNRNG = -48,
EUNATCH = -49,
ENOCSI = -50,
EL2HLT = -51,
EBADE = -52,
EBADR = -53,
EXFULL = -54,
ENOANO = -55,
EBADRQC = -56,
EBADSLT = -57,
EBFONT = -59,
ENOSTR = -60,
ENODATA = -61,
ETIME = -62,
ENOSR = -63,
ENONET = -64,
ENOPKG = -65,
EREMOTE = -66,
ENOLINK = -67,
EADV = -68,
ESRMNT = -69,
ECOMM = -70,
EPROTO = -71,
EMULTIHOP = -72,
EDOTDOT = -73,
EBADMSG = -74,
EOVERFLOW = -75,
ENOTUNIQ = -76,
EBADFD = -77,
EREMCHG = -78,
ELIBACC = -79,
ELIBBAD = -80,
ELIBSCN = -81,
ELIBMAX = -82,
ELIBEXEC = -83,
EILSEQ = -84,
ERESTART = -85,
ESTRPIPE = -86,
EUSERS = -87,
ENOTSOCK = -88,
EDESTADDRREQ = -89,
EMSGSIZE = -90,
EPROTOTYPE = -91,
ENOPROTOOPT = -92,
EPROTONOSUPPORT = -93,
ESOCKTNOSUPPORT = -94,
EOPNOTSUPP = -95,
EPFNOSUPPORT = -96,
EAFNOSUPPORT = -97,
EADDRINUSE = -98,
EADDRNOTAVAIL = -99,
ENETDOWN = -100,
ENETUNREACH = -101,
ENETRESET = -102,
ECONNABORTED = -103,
ECONNRESET = -104,
ENOBUFS = -105,
EISCONN = -106,
ENOTCONN = -107,
ESHUTDOWN = -108,
ETOOMANYREFS = -109,
ETIMEDOUT = -110,
ECONNREFUSED = -111,
EHOSTDOWN = -112,
EHOSTUNREACH = -113,
EALREADY = -114,
EINPROGRESS = -115,
ESTALE = -116,
EUCLEAN = -117,
ENOTNAM = -118,
ENAVAIL = -119,
EISNAM = -120,
EREMOTEIO = -121,
EDQUOT = -122,
ENOMEDIUM = -123,
EMEDIUMTYPE = -124,
ECANCELED = -125,
ENOKEY = -126,
EKEYEXPIRED = -127,
EKEYREVOKED = -128,
EKEYREJECTED = -129,
EOWNERDEAD = -130,
ENOTRECOVERABLE = -131,
ERFKILL = -132,
EHWPOISON = -133,
} | event_channel_op = 32,
physdev_op = 33,
hvm_op = 34,
sysctl = 35, | random_line_split |
rlp.rs | // The purpose of RLP is to {en|de}code arbitrarily nested arrays of binary data
// specs: https://github.com/ethereum/wiki/wiki/RLP
use std::num::Int;
use std::io::BufReader;
use std::io::extensions::u64_to_be_bytes;
use self::RlpEncodable::{Binary, List};
#[deriving(PartialEq, Eq, Show)]
pub enum RlpEncodable {
Binary(Vec<u8>),
List(Vec<RlpEncodable>)
}
#[deriving(PartialEq, Eq, Show)]
pub struct RlpDecodable(Vec<u8>);
pub trait Rlpable {
fn encode(self) -> RlpEncodable;
fn decode(from: RlpDecodable) -> Self;
}
static BINARY_OFFSET: u8 = 128;
static LIST_OFFSET: u8 = 192;
static LENGTH_RANGE: u8 = 55;
impl RlpEncodable {
pub fn encode(self) -> RlpDecodable {
RlpDecodable(
match self {
Binary(v) => {
if v.len() == 1 && v[0] < BINARY_OFFSET { v }
else { RlpEncodable::encode_next_length(v.len(), BINARY_OFFSET) + v }
},
List(v) => {
let mut data:Vec<u8> = Vec::new();
for item in v.into_iter() {
data.push_all(item.encode().to_vec().as_slice());
}
RlpEncodable::encode_next_length(data.len(), LIST_OFFSET) + data
}
}
)
}
fn encode_next_length(length: uint, offset: u8) -> Vec<u8> {
if length <= LENGTH_RANGE as uint {
return vec![length as u8 + offset];
}
for length_of_length in range(0u, 8) {
if length < 32u.pow(length_of_length + 1) {
let mut data = vec![length_of_length as u8 + offset + LENGTH_RANGE];
u64_to_be_bytes(length as u64, length_of_length, |v| data.push_all(v));
return data;
}
}
panic!()
}
}
impl RlpDecodable {
pub fn new(vec: Vec<u8>) -> RlpDecodable {
RlpDecodable(vec)
}
pub fn to_vec(self) -> Vec<u8> {
let RlpDecodable(vec) = self;
vec
}
pub fn decode(self) -> RlpEncodable {
let vec = self.to_vec();
let mut reader = BufReader::new(vec.as_slice());
RlpDecodable::decode_with_bufreader(&mut reader)
}
fn decode_with_bufreader(reader: &mut BufReader) -> RlpEncodable {
match reader.read_byte() {
Ok(byte) if byte < BINARY_OFFSET => {
Binary(vec![byte])
},
Ok(byte) if byte < LIST_OFFSET => {
let length = RlpDecodable::decode_next_length(reader, byte, BINARY_OFFSET);
Binary(reader.read_exact(length).unwrap())
},
Ok(byte) => {
let mut res:Vec<RlpEncodable> = Vec::new();
let length = RlpDecodable::decode_next_length(reader, byte, LIST_OFFSET);
let initial_pos = reader.tell().unwrap() as uint;
while (reader.tell().unwrap() as uint) < initial_pos + length {
res.push(RlpDecodable::decode_with_bufreader(reader));
}
List(res)
}
Err(_) => {
panic!()
},
}
}
fn decode_next_length(reader: &mut BufReader, byte:u8, offset: u8) -> uint {
if byte <= (offset + LENGTH_RANGE) {
(byte - offset) as uint
} else {
let length_of_length = (byte - offset - LENGTH_RANGE) as uint;
reader.read_be_uint_n(length_of_length).unwrap() as uint
}
}
}
#[cfg(test)]
mod tests {
use std::vec;
use super::{RlpEncodable, RlpDecodable};
use super::RlpEncodable::{Binary, List};
macro_rules! s(($s:expr) => (Binary(String::from_str($s).into_bytes())))
macro_rules! l(($($e:expr),*) => (List(vec!($($e),*))))
fn generate_pairs() -> vec::MoveItems<(RlpEncodable, RlpDecodable)> |
#[test]
fn rlp_encodage() {
for (a, b) in generate_pairs() {
assert!(a.encode() == b);
}
}
#[test]
fn rlp_decodage() {
for (b, a) in generate_pairs() {
assert!(a.decode() == b);
}
}
}
| {
let lorem = "Lorem ipsum dolor sit amet, consectetur adipisicing elit";
(vec![
(s!(""), RlpDecodable(vec![0x80])),
(s!("\x0f"), RlpDecodable(vec![0x0f])),
(s!("\x04\x00"), RlpDecodable(vec![0x82, 0x04, 0x00])),
(s!("dog"), RlpDecodable(vec![0x83, 0x64, 0x6f, 0x67])),
(l![], RlpDecodable(vec![0xc0])),
(l![s!("cat"), s!("dog")], RlpDecodable(vec![0xc8, 0x83, 0x63, 0x61, 0x74, 0x83, 0x64, 0x6f, 0x67])),
(s!(lorem), RlpDecodable(vec![0xb8, 0x38] + String::from_str(lorem).into_bytes())),
(
l![l![], l![l![]], l![l![], l![l![]]]],
RlpDecodable(vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0])
),
]).into_iter()
} | identifier_body |
rlp.rs | // The purpose of RLP is to {en|de}code arbitrarily nested arrays of binary data
// specs: https://github.com/ethereum/wiki/wiki/RLP
use std::num::Int;
use std::io::BufReader;
use std::io::extensions::u64_to_be_bytes;
use self::RlpEncodable::{Binary, List};
#[deriving(PartialEq, Eq, Show)]
pub enum RlpEncodable {
Binary(Vec<u8>),
List(Vec<RlpEncodable>)
}
#[deriving(PartialEq, Eq, Show)]
pub struct RlpDecodable(Vec<u8>);
pub trait Rlpable {
fn encode(self) -> RlpEncodable;
fn decode(from: RlpDecodable) -> Self;
}
static BINARY_OFFSET: u8 = 128;
static LIST_OFFSET: u8 = 192;
static LENGTH_RANGE: u8 = 55;
impl RlpEncodable {
pub fn encode(self) -> RlpDecodable {
RlpDecodable(
match self {
Binary(v) => {
if v.len() == 1 && v[0] < BINARY_OFFSET { v }
else { RlpEncodable::encode_next_length(v.len(), BINARY_OFFSET) + v }
},
List(v) => {
let mut data:Vec<u8> = Vec::new();
for item in v.into_iter() {
data.push_all(item.encode().to_vec().as_slice());
}
RlpEncodable::encode_next_length(data.len(), LIST_OFFSET) + data
}
}
)
}
fn encode_next_length(length: uint, offset: u8) -> Vec<u8> {
if length <= LENGTH_RANGE as uint {
return vec![length as u8 + offset];
}
for length_of_length in range(0u, 8) { | u64_to_be_bytes(length as u64, length_of_length, |v| data.push_all(v));
return data;
}
}
panic!()
}
}
impl RlpDecodable {
pub fn new(vec: Vec<u8>) -> RlpDecodable {
RlpDecodable(vec)
}
pub fn to_vec(self) -> Vec<u8> {
let RlpDecodable(vec) = self;
vec
}
pub fn decode(self) -> RlpEncodable {
let vec = self.to_vec();
let mut reader = BufReader::new(vec.as_slice());
RlpDecodable::decode_with_bufreader(&mut reader)
}
fn decode_with_bufreader(reader: &mut BufReader) -> RlpEncodable {
match reader.read_byte() {
Ok(byte) if byte < BINARY_OFFSET => {
Binary(vec![byte])
},
Ok(byte) if byte < LIST_OFFSET => {
let length = RlpDecodable::decode_next_length(reader, byte, BINARY_OFFSET);
Binary(reader.read_exact(length).unwrap())
},
Ok(byte) => {
let mut res:Vec<RlpEncodable> = Vec::new();
let length = RlpDecodable::decode_next_length(reader, byte, LIST_OFFSET);
let initial_pos = reader.tell().unwrap() as uint;
while (reader.tell().unwrap() as uint) < initial_pos + length {
res.push(RlpDecodable::decode_with_bufreader(reader));
}
List(res)
}
Err(_) => {
panic!()
},
}
}
fn decode_next_length(reader: &mut BufReader, byte:u8, offset: u8) -> uint {
if byte <= (offset + LENGTH_RANGE) {
(byte - offset) as uint
} else {
let length_of_length = (byte - offset - LENGTH_RANGE) as uint;
reader.read_be_uint_n(length_of_length).unwrap() as uint
}
}
}
#[cfg(test)]
mod tests {
use std::vec;
use super::{RlpEncodable, RlpDecodable};
use super::RlpEncodable::{Binary, List};
macro_rules! s(($s:expr) => (Binary(String::from_str($s).into_bytes())))
macro_rules! l(($($e:expr),*) => (List(vec!($($e),*))))
fn generate_pairs() -> vec::MoveItems<(RlpEncodable, RlpDecodable)> {
let lorem = "Lorem ipsum dolor sit amet, consectetur adipisicing elit";
(vec![
(s!(""), RlpDecodable(vec![0x80])),
(s!("\x0f"), RlpDecodable(vec![0x0f])),
(s!("\x04\x00"), RlpDecodable(vec![0x82, 0x04, 0x00])),
(s!("dog"), RlpDecodable(vec![0x83, 0x64, 0x6f, 0x67])),
(l![], RlpDecodable(vec![0xc0])),
(l![s!("cat"), s!("dog")], RlpDecodable(vec![0xc8, 0x83, 0x63, 0x61, 0x74, 0x83, 0x64, 0x6f, 0x67])),
(s!(lorem), RlpDecodable(vec![0xb8, 0x38] + String::from_str(lorem).into_bytes())),
(
l![l![], l![l![]], l![l![], l![l![]]]],
RlpDecodable(vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0])
),
]).into_iter()
}
#[test]
fn rlp_encodage() {
for (a, b) in generate_pairs() {
assert!(a.encode() == b);
}
}
#[test]
fn rlp_decodage() {
for (b, a) in generate_pairs() {
assert!(a.decode() == b);
}
}
} | if length < 32u.pow(length_of_length + 1) {
let mut data = vec![length_of_length as u8 + offset + LENGTH_RANGE]; | random_line_split |
rlp.rs | // The purpose of RLP is to {en|de}code arbitrarily nested arrays of binary data
// specs: https://github.com/ethereum/wiki/wiki/RLP
use std::num::Int;
use std::io::BufReader;
use std::io::extensions::u64_to_be_bytes;
use self::RlpEncodable::{Binary, List};
#[deriving(PartialEq, Eq, Show)]
pub enum RlpEncodable {
Binary(Vec<u8>),
List(Vec<RlpEncodable>)
}
#[deriving(PartialEq, Eq, Show)]
pub struct RlpDecodable(Vec<u8>);
pub trait Rlpable {
fn encode(self) -> RlpEncodable;
fn decode(from: RlpDecodable) -> Self;
}
static BINARY_OFFSET: u8 = 128;
static LIST_OFFSET: u8 = 192;
static LENGTH_RANGE: u8 = 55;
impl RlpEncodable {
pub fn encode(self) -> RlpDecodable {
RlpDecodable(
match self {
Binary(v) => {
if v.len() == 1 && v[0] < BINARY_OFFSET { v }
else { RlpEncodable::encode_next_length(v.len(), BINARY_OFFSET) + v }
},
List(v) => {
let mut data:Vec<u8> = Vec::new();
for item in v.into_iter() {
data.push_all(item.encode().to_vec().as_slice());
}
RlpEncodable::encode_next_length(data.len(), LIST_OFFSET) + data
}
}
)
}
fn encode_next_length(length: uint, offset: u8) -> Vec<u8> {
if length <= LENGTH_RANGE as uint {
return vec![length as u8 + offset];
}
for length_of_length in range(0u, 8) {
if length < 32u.pow(length_of_length + 1) {
let mut data = vec![length_of_length as u8 + offset + LENGTH_RANGE];
u64_to_be_bytes(length as u64, length_of_length, |v| data.push_all(v));
return data;
}
}
panic!()
}
}
impl RlpDecodable {
pub fn new(vec: Vec<u8>) -> RlpDecodable {
RlpDecodable(vec)
}
pub fn to_vec(self) -> Vec<u8> {
let RlpDecodable(vec) = self;
vec
}
pub fn decode(self) -> RlpEncodable {
let vec = self.to_vec();
let mut reader = BufReader::new(vec.as_slice());
RlpDecodable::decode_with_bufreader(&mut reader)
}
fn decode_with_bufreader(reader: &mut BufReader) -> RlpEncodable {
match reader.read_byte() {
Ok(byte) if byte < BINARY_OFFSET => {
Binary(vec![byte])
},
Ok(byte) if byte < LIST_OFFSET => {
let length = RlpDecodable::decode_next_length(reader, byte, BINARY_OFFSET);
Binary(reader.read_exact(length).unwrap())
},
Ok(byte) => {
let mut res:Vec<RlpEncodable> = Vec::new();
let length = RlpDecodable::decode_next_length(reader, byte, LIST_OFFSET);
let initial_pos = reader.tell().unwrap() as uint;
while (reader.tell().unwrap() as uint) < initial_pos + length {
res.push(RlpDecodable::decode_with_bufreader(reader));
}
List(res)
}
Err(_) => {
panic!()
},
}
}
fn | (reader: &mut BufReader, byte:u8, offset: u8) -> uint {
if byte <= (offset + LENGTH_RANGE) {
(byte - offset) as uint
} else {
let length_of_length = (byte - offset - LENGTH_RANGE) as uint;
reader.read_be_uint_n(length_of_length).unwrap() as uint
}
}
}
#[cfg(test)]
mod tests {
use std::vec;
use super::{RlpEncodable, RlpDecodable};
use super::RlpEncodable::{Binary, List};
macro_rules! s(($s:expr) => (Binary(String::from_str($s).into_bytes())))
macro_rules! l(($($e:expr),*) => (List(vec!($($e),*))))
fn generate_pairs() -> vec::MoveItems<(RlpEncodable, RlpDecodable)> {
let lorem = "Lorem ipsum dolor sit amet, consectetur adipisicing elit";
(vec![
(s!(""), RlpDecodable(vec![0x80])),
(s!("\x0f"), RlpDecodable(vec![0x0f])),
(s!("\x04\x00"), RlpDecodable(vec![0x82, 0x04, 0x00])),
(s!("dog"), RlpDecodable(vec![0x83, 0x64, 0x6f, 0x67])),
(l![], RlpDecodable(vec![0xc0])),
(l![s!("cat"), s!("dog")], RlpDecodable(vec![0xc8, 0x83, 0x63, 0x61, 0x74, 0x83, 0x64, 0x6f, 0x67])),
(s!(lorem), RlpDecodable(vec![0xb8, 0x38] + String::from_str(lorem).into_bytes())),
(
l![l![], l![l![]], l![l![], l![l![]]]],
RlpDecodable(vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0])
),
]).into_iter()
}
#[test]
fn rlp_encodage() {
for (a, b) in generate_pairs() {
assert!(a.encode() == b);
}
}
#[test]
fn rlp_decodage() {
for (b, a) in generate_pairs() {
assert!(a.decode() == b);
}
}
}
| decode_next_length | identifier_name |
rlp.rs | // The purpose of RLP is to {en|de}code arbitrarily nested arrays of binary data
// specs: https://github.com/ethereum/wiki/wiki/RLP
use std::num::Int;
use std::io::BufReader;
use std::io::extensions::u64_to_be_bytes;
use self::RlpEncodable::{Binary, List};
#[deriving(PartialEq, Eq, Show)]
pub enum RlpEncodable {
Binary(Vec<u8>),
List(Vec<RlpEncodable>)
}
#[deriving(PartialEq, Eq, Show)]
pub struct RlpDecodable(Vec<u8>);
pub trait Rlpable {
fn encode(self) -> RlpEncodable;
fn decode(from: RlpDecodable) -> Self;
}
static BINARY_OFFSET: u8 = 128;
static LIST_OFFSET: u8 = 192;
static LENGTH_RANGE: u8 = 55;
impl RlpEncodable {
pub fn encode(self) -> RlpDecodable {
RlpDecodable(
match self {
Binary(v) => {
if v.len() == 1 && v[0] < BINARY_OFFSET { v }
else |
},
List(v) => {
let mut data:Vec<u8> = Vec::new();
for item in v.into_iter() {
data.push_all(item.encode().to_vec().as_slice());
}
RlpEncodable::encode_next_length(data.len(), LIST_OFFSET) + data
}
}
)
}
fn encode_next_length(length: uint, offset: u8) -> Vec<u8> {
if length <= LENGTH_RANGE as uint {
return vec![length as u8 + offset];
}
for length_of_length in range(0u, 8) {
if length < 32u.pow(length_of_length + 1) {
let mut data = vec![length_of_length as u8 + offset + LENGTH_RANGE];
u64_to_be_bytes(length as u64, length_of_length, |v| data.push_all(v));
return data;
}
}
panic!()
}
}
impl RlpDecodable {
pub fn new(vec: Vec<u8>) -> RlpDecodable {
RlpDecodable(vec)
}
pub fn to_vec(self) -> Vec<u8> {
let RlpDecodable(vec) = self;
vec
}
pub fn decode(self) -> RlpEncodable {
let vec = self.to_vec();
let mut reader = BufReader::new(vec.as_slice());
RlpDecodable::decode_with_bufreader(&mut reader)
}
fn decode_with_bufreader(reader: &mut BufReader) -> RlpEncodable {
match reader.read_byte() {
Ok(byte) if byte < BINARY_OFFSET => {
Binary(vec![byte])
},
Ok(byte) if byte < LIST_OFFSET => {
let length = RlpDecodable::decode_next_length(reader, byte, BINARY_OFFSET);
Binary(reader.read_exact(length).unwrap())
},
Ok(byte) => {
let mut res:Vec<RlpEncodable> = Vec::new();
let length = RlpDecodable::decode_next_length(reader, byte, LIST_OFFSET);
let initial_pos = reader.tell().unwrap() as uint;
while (reader.tell().unwrap() as uint) < initial_pos + length {
res.push(RlpDecodable::decode_with_bufreader(reader));
}
List(res)
}
Err(_) => {
panic!()
},
}
}
fn decode_next_length(reader: &mut BufReader, byte:u8, offset: u8) -> uint {
if byte <= (offset + LENGTH_RANGE) {
(byte - offset) as uint
} else {
let length_of_length = (byte - offset - LENGTH_RANGE) as uint;
reader.read_be_uint_n(length_of_length).unwrap() as uint
}
}
}
#[cfg(test)]
mod tests {
use std::vec;
use super::{RlpEncodable, RlpDecodable};
use super::RlpEncodable::{Binary, List};
macro_rules! s(($s:expr) => (Binary(String::from_str($s).into_bytes())))
macro_rules! l(($($e:expr),*) => (List(vec!($($e),*))))
fn generate_pairs() -> vec::MoveItems<(RlpEncodable, RlpDecodable)> {
let lorem = "Lorem ipsum dolor sit amet, consectetur adipisicing elit";
(vec![
(s!(""), RlpDecodable(vec![0x80])),
(s!("\x0f"), RlpDecodable(vec![0x0f])),
(s!("\x04\x00"), RlpDecodable(vec![0x82, 0x04, 0x00])),
(s!("dog"), RlpDecodable(vec![0x83, 0x64, 0x6f, 0x67])),
(l![], RlpDecodable(vec![0xc0])),
(l![s!("cat"), s!("dog")], RlpDecodable(vec![0xc8, 0x83, 0x63, 0x61, 0x74, 0x83, 0x64, 0x6f, 0x67])),
(s!(lorem), RlpDecodable(vec![0xb8, 0x38] + String::from_str(lorem).into_bytes())),
(
l![l![], l![l![]], l![l![], l![l![]]]],
RlpDecodable(vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0])
),
]).into_iter()
}
#[test]
fn rlp_encodage() {
for (a, b) in generate_pairs() {
assert!(a.encode() == b);
}
}
#[test]
fn rlp_decodage() {
for (b, a) in generate_pairs() {
assert!(a.decode() == b);
}
}
}
| { RlpEncodable::encode_next_length(v.len(), BINARY_OFFSET) + v } | conditional_block |
tests.rs | use super::make_command_line;
use super::Arg;
use crate::env;
use crate::ffi::{OsStr, OsString};
use crate::process::Command;
#[test]
fn test_raw_args() {
let command_line = &make_command_line(
OsStr::new("quoted exe"),
&[
Arg::Regular(OsString::from("quote me")),
Arg::Raw(OsString::from("quote me *not*")),
Arg::Raw(OsString::from("\t\\")),
Arg::Raw(OsString::from("internal \\\"backslash-\"quote")),
Arg::Regular(OsString::from("optional-quotes")),
],
false,
)
.unwrap();
assert_eq!(
String::from_utf16(command_line).unwrap(),
"\"quoted exe\" \"quote me\" quote me *not* \t\\ internal \\\"backslash-\"quote optional-quotes"
);
}
#[test]
fn test_make_command_line() {
fn test_wrapper(prog: &str, args: &[&str], force_quotes: bool) -> String {
let command_line = &make_command_line(
OsStr::new(prog),
&args.iter().map(|a| Arg::Regular(OsString::from(a))).collect::<Vec<_>>(),
force_quotes,
)
.unwrap();
String::from_utf16(command_line).unwrap()
}
assert_eq!(test_wrapper("prog", &["aaa", "bbb", "ccc"], false), "\"prog\" aaa bbb ccc");
assert_eq!(test_wrapper("prog", &[r"C:\"], false), r#""prog" C:\"#);
assert_eq!(test_wrapper("prog", &[r"2slashes\\"], false), r#""prog" 2slashes\\"#);
assert_eq!(test_wrapper("prog", &[r" C:\"], false), r#""prog" " C:\\""#);
assert_eq!(test_wrapper("prog", &[r" 2slashes\\"], false), r#""prog" " 2slashes\\\\""#);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa"], false),
"\"C:\\Program Files\\blah\\blah.exe\" aaa"
);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa", "v*"], false),
"\"C:\\Program Files\\blah\\blah.exe\" aaa v*"
);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa", "v*"], true),
"\"C:\\Program Files\\blah\\blah.exe\" \"aaa\" \"v*\""
);
assert_eq!(
test_wrapper("C:\\Program Files\\test", &["aa\"bb"], false),
"\"C:\\Program Files\\test\" aa\\\"bb"
);
assert_eq!(test_wrapper("echo", &["a b c"], false), "\"echo\" \"a b c\"");
assert_eq!(
test_wrapper("echo", &["\" \\\" \\", "\\"], false),
"\"echo\" \"\\\" \\\\\\\" \\\\\" \\"
);
assert_eq!(
test_wrapper("\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}", &[], false),
"\"\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}\""
);
}
// On Windows, environment args are case preserving but comparisons are case-insensitive.
// See: #85242
#[test]
fn windows_env_unicode_case() {
let test_cases = [
("ä", "Ä"),
("ß", "SS"),
("Ä", "Ö"),
("Ä", "Ö"),
("I", "İ"),
("I", "i"),
("I", "ı"),
("i", "I"),
("i", "İ"),
("i", "ı"),
("İ", "I"),
("İ", "i"),
("İ", "ı"),
("ı", "I"),
("ı", "i"),
("ı", "İ"),
("ä", "Ä"),
("ß", "SS"),
("Ä", "Ö"),
("Ä", "Ö"),
("I", "İ"),
("I", "i"),
("I", "ı"),
("i", "I"),
("i", "İ"),
("i", "ı"),
("İ", "I"),
("İ", "i"),
("İ", "ı"),
("ı", "I"),
("ı", "i"),
("ı", "İ"),
];
// Test that `cmd.env` matches `env::set_var` when setting two strings that
// may (or may not) be case-folded when compared.
for (a, b) in test_cases.iter() {
let mut cmd = Command::new("cmd");
cmd.env(a, "1");
cmd.env(b, "2");
env::set_var(a, "1");
env::set_var(b, "2");
for (key, value) in cmd.get_envs() {
assert_eq!(
env::var(key).ok(),
value.map(|s| s.to_string_lossy().into_owned()),
"command environment mismatch: {} {}",
a,
b
);
}
}
}
// UWP applications run in a restricted environment which means this test may not work.
#[cfg(not(target_vendor = "uwp"))]
#[test]
fn windows_exe_resolver() {
use super::resolve_exe;
use crate::io;
// Test a full path, with and without the `exe` extension.
let mut current_exe = env::current_exe().unwrap();
assert!(resolve_exe(current_exe.as_ref(), None).is_ok());
current_exe.set_extension("");
assert!(resolve_exe(current_exe.as_ref(), None).is_ok());
// Test lone file names.
assert!(resolve_exe(OsStr::new("cmd"), None).is_ok());
assert!(resolve_exe(OsStr::new("cmd.exe"), None).is_ok());
assert!(resolve_exe(OsStr::new("cmd.EXE"), None).is_ok());
assert!(resolve_exe(OsStr::new("fc"), None).is_ok()); | // Invalid file names should return InvalidInput.
assert_eq!(resolve_exe(OsStr::new(""), None).unwrap_err().kind(), io::ErrorKind::InvalidInput);
assert_eq!(
resolve_exe(OsStr::new("\0"), None).unwrap_err().kind(),
io::ErrorKind::InvalidInput
);
// Trailing slash, therefore there's no file name component.
assert_eq!(
resolve_exe(OsStr::new(r"C:\Path\to\"), None).unwrap_err().kind(),
io::ErrorKind::InvalidInput
);
/*
Some of the following tests may need to be changed if you are deliberately
changing the behaviour of `resolve_exe`.
*/
let paths = env::var_os("PATH").unwrap();
env::set_var("PATH", "");
assert_eq!(resolve_exe(OsStr::new("rustc"), None).unwrap_err().kind(), io::ErrorKind::NotFound);
let child_paths = Some(paths.as_os_str());
assert!(resolve_exe(OsStr::new("rustc"), child_paths).is_ok());
// The resolver looks in system directories even when `PATH` is empty.
assert!(resolve_exe(OsStr::new("cmd.exe"), None).is_ok());
// The application's directory is also searched.
let current_exe = env::current_exe().unwrap();
assert!(resolve_exe(current_exe.file_name().unwrap().as_ref(), None).is_ok());
} | random_line_split |
|
tests.rs | use super::make_command_line;
use super::Arg;
use crate::env;
use crate::ffi::{OsStr, OsString};
use crate::process::Command;
#[test]
fn test_raw_args() {
let command_line = &make_command_line(
OsStr::new("quoted exe"),
&[
Arg::Regular(OsString::from("quote me")),
Arg::Raw(OsString::from("quote me *not*")),
Arg::Raw(OsString::from("\t\\")),
Arg::Raw(OsString::from("internal \\\"backslash-\"quote")),
Arg::Regular(OsString::from("optional-quotes")),
],
false,
)
.unwrap();
assert_eq!(
String::from_utf16(command_line).unwrap(),
"\"quoted exe\" \"quote me\" quote me *not* \t\\ internal \\\"backslash-\"quote optional-quotes"
);
}
#[test]
fn test_make_command_line() {
fn test_wrapper(prog: &str, args: &[&str], force_quotes: bool) -> String {
let command_line = &make_command_line(
OsStr::new(prog),
&args.iter().map(|a| Arg::Regular(OsString::from(a))).collect::<Vec<_>>(),
force_quotes,
)
.unwrap();
String::from_utf16(command_line).unwrap()
}
assert_eq!(test_wrapper("prog", &["aaa", "bbb", "ccc"], false), "\"prog\" aaa bbb ccc");
assert_eq!(test_wrapper("prog", &[r"C:\"], false), r#""prog" C:\"#);
assert_eq!(test_wrapper("prog", &[r"2slashes\\"], false), r#""prog" 2slashes\\"#);
assert_eq!(test_wrapper("prog", &[r" C:\"], false), r#""prog" " C:\\""#);
assert_eq!(test_wrapper("prog", &[r" 2slashes\\"], false), r#""prog" " 2slashes\\\\""#);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa"], false),
"\"C:\\Program Files\\blah\\blah.exe\" aaa"
);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa", "v*"], false),
"\"C:\\Program Files\\blah\\blah.exe\" aaa v*"
);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa", "v*"], true),
"\"C:\\Program Files\\blah\\blah.exe\" \"aaa\" \"v*\""
);
assert_eq!(
test_wrapper("C:\\Program Files\\test", &["aa\"bb"], false),
"\"C:\\Program Files\\test\" aa\\\"bb"
);
assert_eq!(test_wrapper("echo", &["a b c"], false), "\"echo\" \"a b c\"");
assert_eq!(
test_wrapper("echo", &["\" \\\" \\", "\\"], false),
"\"echo\" \"\\\" \\\\\\\" \\\\\" \\"
);
assert_eq!(
test_wrapper("\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}", &[], false),
"\"\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}\""
);
}
// On Windows, environment args are case preserving but comparisons are case-insensitive.
// See: #85242
#[test]
fn windows_env_unicode_case() {
let test_cases = [
("ä", "Ä"),
("ß", "SS"),
("Ä", "Ö"),
("Ä", "Ö"),
("I", "İ"),
("I", "i"),
("I", "ı"),
("i", "I"),
("i", "İ"),
("i", "ı"),
("İ", "I"),
("İ", "i"),
("İ", "ı"),
("ı", "I"),
("ı", "i"),
("ı", "İ"),
("ä", "Ä"),
("ß", "SS"),
("Ä", "Ö"),
("Ä", "Ö"),
("I", "İ"),
("I", "i"),
("I", "ı"),
("i", "I"),
("i", "İ"),
("i", "ı"),
("İ", "I"),
("İ", "i"),
("İ", "ı"),
("ı", "I"),
("ı", "i"),
("ı", "İ"),
];
// Test that `cmd.env` matches `env::set_var` when setting two strings that
// may (or may not) be case-folded when compared.
for (a, b) in test_cases.iter() {
let mut cmd = Command::new("cmd");
cmd.env(a, "1");
cmd.env(b, "2");
env::set_var(a, "1");
env::set_var(b, "2");
for (key, value) in cmd.get_envs() {
assert_eq!(
env::var(key).ok(),
value.map(|s| s.to_string_lossy().into_owned()),
"command environment mismatch: {} {}",
a,
b
);
}
}
}
// UWP applications run in a restricted environment which means this test may not work.
#[cfg(not(target_vendor = "uwp"))]
#[test]
fn windows_exe_resolver() {
use super::resolve_exe;
use | // Trailing slash, therefore there's no file name component.
assert_eq!(
resolve_exe(OsStr::new(r"C:\Path\to\"), None).unwrap_err().kind(),
io::ErrorKind::InvalidInput
);
/*
Some of the following tests may need to be changed if you are deliberately
changing the behaviour of `resolve_exe`.
*/
let paths = env::var_os("PATH").unwrap();
env::set_var("PATH", "");
assert_eq!(resolve_exe(OsStr::new("rustc"), None).unwrap_err().kind(), io::ErrorKind::NotFound);
let child_paths = Some(paths.as_os_str());
assert!(resolve_exe(OsStr::new("rustc"), child_paths).is_ok());
// The resolver looks in system directories even when `PATH` is empty.
assert!(resolve_exe(OsStr::new("cmd.exe"), None).is_ok());
// The application's directory is also searched.
let current_exe = env::current_exe().unwrap();
assert!(resolve_exe(current_exe.file_name().unwrap().as_ref(), None).is_ok());
}
| crate::io;
// Test a full path, with and without the `exe` extension.
let mut current_exe = env::current_exe().unwrap();
assert!(resolve_exe(current_exe.as_ref(), None).is_ok());
current_exe.set_extension("");
assert!(resolve_exe(current_exe.as_ref(), None).is_ok());
// Test lone file names.
assert!(resolve_exe(OsStr::new("cmd"), None).is_ok());
assert!(resolve_exe(OsStr::new("cmd.exe"), None).is_ok());
assert!(resolve_exe(OsStr::new("cmd.EXE"), None).is_ok());
assert!(resolve_exe(OsStr::new("fc"), None).is_ok());
// Invalid file names should return InvalidInput.
assert_eq!(resolve_exe(OsStr::new(""), None).unwrap_err().kind(), io::ErrorKind::InvalidInput);
assert_eq!(
resolve_exe(OsStr::new("\0"), None).unwrap_err().kind(),
io::ErrorKind::InvalidInput
); | identifier_body |
tests.rs | use super::make_command_line;
use super::Arg;
use crate::env;
use crate::ffi::{OsStr, OsString};
use crate::process::Command;
#[test]
fn test_raw_args() {
let command_line = &make_command_line(
OsStr::new("quoted exe"),
&[
Arg::Regular(OsString::from("quote me")),
Arg::Raw(OsString::from("quote me *not*")),
Arg::Raw(OsString::from("\t\\")),
Arg::Raw(OsString::from("internal \\\"backslash-\"quote")),
Arg::Regular(OsString::from("optional-quotes")),
],
false,
)
.unwrap();
assert_eq!(
String::from_utf16(command_line).unwrap(),
"\"quoted exe\" \"quote me\" quote me *not* \t\\ internal \\\"backslash-\"quote optional-quotes"
);
}
#[test]
fn test_make_command_line() {
fn test_wrapper(prog: &str, args: &[&str], force_quotes: bool) -> String {
let command_line = &make_command_line(
OsStr::new(prog),
&args.iter().map(|a| Arg::Regular(OsString::from(a))).collect::<Vec<_>>(),
force_quotes,
)
.unwrap();
String::from_utf16(command_line).unwrap()
}
assert_eq!(test_wrapper("prog", &["aaa", "bbb", "ccc"], false), "\"prog\" aaa bbb ccc");
assert_eq!(test_wrapper("prog", &[r"C:\"], false), r#""prog" C:\"#);
assert_eq!(test_wrapper("prog", &[r"2slashes\\"], false), r#""prog" 2slashes\\"#);
assert_eq!(test_wrapper("prog", &[r" C:\"], false), r#""prog" " C:\\""#);
assert_eq!(test_wrapper("prog", &[r" 2slashes\\"], false), r#""prog" " 2slashes\\\\""#);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa"], false),
"\"C:\\Program Files\\blah\\blah.exe\" aaa"
);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa", "v*"], false),
"\"C:\\Program Files\\blah\\blah.exe\" aaa v*"
);
assert_eq!(
test_wrapper("C:\\Program Files\\blah\\blah.exe", &["aaa", "v*"], true),
"\"C:\\Program Files\\blah\\blah.exe\" \"aaa\" \"v*\""
);
assert_eq!(
test_wrapper("C:\\Program Files\\test", &["aa\"bb"], false),
"\"C:\\Program Files\\test\" aa\\\"bb"
);
assert_eq!(test_wrapper("echo", &["a b c"], false), "\"echo\" \"a b c\"");
assert_eq!(
test_wrapper("echo", &["\" \\\" \\", "\\"], false),
"\"echo\" \"\\\" \\\\\\\" \\\\\" \\"
);
assert_eq!(
test_wrapper("\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}", &[], false),
"\"\u{03c0}\u{042f}\u{97f3}\u{00e6}\u{221e}\""
);
}
// On Windows, environment args are case preserving but comparisons are case-insensitive.
// See: #85242
#[test]
fn | () {
let test_cases = [
("ä", "Ä"),
("ß", "SS"),
("Ä", "Ö"),
("Ä", "Ö"),
("I", "İ"),
("I", "i"),
("I", "ı"),
("i", "I"),
("i", "İ"),
("i", "ı"),
("İ", "I"),
("İ", "i"),
("İ", "ı"),
("ı", "I"),
("ı", "i"),
("ı", "İ"),
("ä", "Ä"),
("ß", "SS"),
("Ä", "Ö"),
("Ä", "Ö"),
("I", "İ"),
("I", "i"),
("I", "ı"),
("i", "I"),
("i", "İ"),
("i", "ı"),
("İ", "I"),
("İ", "i"),
("İ", "ı"),
("ı", "I"),
("ı", "i"),
("ı", "İ"),
];
// Test that `cmd.env` matches `env::set_var` when setting two strings that
// may (or may not) be case-folded when compared.
for (a, b) in test_cases.iter() {
let mut cmd = Command::new("cmd");
cmd.env(a, "1");
cmd.env(b, "2");
env::set_var(a, "1");
env::set_var(b, "2");
for (key, value) in cmd.get_envs() {
assert_eq!(
env::var(key).ok(),
value.map(|s| s.to_string_lossy().into_owned()),
"command environment mismatch: {} {}",
a,
b
);
}
}
}
// UWP applications run in a restricted environment which means this test may not work.
#[cfg(not(target_vendor = "uwp"))]
#[test]
fn windows_exe_resolver() {
use super::resolve_exe;
use crate::io;
// Test a full path, with and without the `exe` extension.
let mut current_exe = env::current_exe().unwrap();
assert!(resolve_exe(current_exe.as_ref(), None).is_ok());
current_exe.set_extension("");
assert!(resolve_exe(current_exe.as_ref(), None).is_ok());
// Test lone file names.
assert!(resolve_exe(OsStr::new("cmd"), None).is_ok());
assert!(resolve_exe(OsStr::new("cmd.exe"), None).is_ok());
assert!(resolve_exe(OsStr::new("cmd.EXE"), None).is_ok());
assert!(resolve_exe(OsStr::new("fc"), None).is_ok());
// Invalid file names should return InvalidInput.
assert_eq!(resolve_exe(OsStr::new(""), None).unwrap_err().kind(), io::ErrorKind::InvalidInput);
assert_eq!(
resolve_exe(OsStr::new("\0"), None).unwrap_err().kind(),
io::ErrorKind::InvalidInput
);
// Trailing slash, therefore there's no file name component.
assert_eq!(
resolve_exe(OsStr::new(r"C:\Path\to\"), None).unwrap_err().kind(),
io::ErrorKind::InvalidInput
);
/*
Some of the following tests may need to be changed if you are deliberately
changing the behaviour of `resolve_exe`.
*/
let paths = env::var_os("PATH").unwrap();
env::set_var("PATH", "");
assert_eq!(resolve_exe(OsStr::new("rustc"), None).unwrap_err().kind(), io::ErrorKind::NotFound);
let child_paths = Some(paths.as_os_str());
assert!(resolve_exe(OsStr::new("rustc"), child_paths).is_ok());
// The resolver looks in system directories even when `PATH` is empty.
assert!(resolve_exe(OsStr::new("cmd.exe"), None).is_ok());
// The application's directory is also searched.
let current_exe = env::current_exe().unwrap();
assert!(resolve_exe(current_exe.file_name().unwrap().as_ref(), None).is_ok());
}
| windows_env_unicode_case | identifier_name |
include_socket_addr.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
impl<SD: SendDescUnicast> SendDescUnicast for IncludeSocketAddr<SD> {}
impl<SD: SendDescMulticast> SendDescMulticast for IncludeSocketAddr<SD> {}
/// Combinator for Send Descriptors created by [`SendDescExt::include_socket_addr`].
#[derive(Debug)]
pub struct IncludeSocketAddr<SD> {
pub(super) inner: SD,
}
impl<SD> IncludeSocketAddr<SD> {
pub(super) fn new(inner: SD) -> IncludeSocketAddr<SD> {
IncludeSocketAddr { inner }
}
}
impl<SD, IC, R> SendDesc<IC, (R, IC::SocketAddr)> for IncludeSocketAddr<SD>
where
SD: SendDesc<IC, R> + Send,
IC: InboundContext,
R: Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_supports_option!(inner);
fn handler(
&mut self,
context: Result<&IC, Error>,
) -> Result<ResponseStatus<(R, IC::SocketAddr)>, Error> |
}
| {
let socket_addr = context.ok().map(|x| x.remote_socket_addr());
self.inner.handler(context).map(|x| match (x, socket_addr) {
(ResponseStatus::Done(x), Some(socket_addr)) => ResponseStatus::Done((x, socket_addr)),
(ResponseStatus::Done(_), None) => unreachable!(),
(ResponseStatus::SendNext, _) => ResponseStatus::SendNext,
(ResponseStatus::Continue, _) => ResponseStatus::Continue,
})
} | identifier_body |
include_socket_addr.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
impl<SD: SendDescUnicast> SendDescUnicast for IncludeSocketAddr<SD> {}
impl<SD: SendDescMulticast> SendDescMulticast for IncludeSocketAddr<SD> {}
/// Combinator for Send Descriptors created by [`SendDescExt::include_socket_addr`].
#[derive(Debug)]
pub struct IncludeSocketAddr<SD> {
pub(super) inner: SD,
}
impl<SD> IncludeSocketAddr<SD> {
pub(super) fn new(inner: SD) -> IncludeSocketAddr<SD> {
IncludeSocketAddr { inner }
}
}
impl<SD, IC, R> SendDesc<IC, (R, IC::SocketAddr)> for IncludeSocketAddr<SD>
where
SD: SendDesc<IC, R> + Send,
IC: InboundContext,
R: Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_supports_option!(inner);
fn | (
&mut self,
context: Result<&IC, Error>,
) -> Result<ResponseStatus<(R, IC::SocketAddr)>, Error> {
let socket_addr = context.ok().map(|x| x.remote_socket_addr());
self.inner.handler(context).map(|x| match (x, socket_addr) {
(ResponseStatus::Done(x), Some(socket_addr)) => ResponseStatus::Done((x, socket_addr)),
(ResponseStatus::Done(_), None) => unreachable!(),
(ResponseStatus::SendNext, _) => ResponseStatus::SendNext,
(ResponseStatus::Continue, _) => ResponseStatus::Continue,
})
}
}
| handler | identifier_name |
include_socket_addr.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0 | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::*;
impl<SD: SendDescUnicast> SendDescUnicast for IncludeSocketAddr<SD> {}
impl<SD: SendDescMulticast> SendDescMulticast for IncludeSocketAddr<SD> {}
/// Combinator for Send Descriptors created by [`SendDescExt::include_socket_addr`].
#[derive(Debug)]
pub struct IncludeSocketAddr<SD> {
pub(super) inner: SD,
}
impl<SD> IncludeSocketAddr<SD> {
pub(super) fn new(inner: SD) -> IncludeSocketAddr<SD> {
IncludeSocketAddr { inner }
}
}
impl<SD, IC, R> SendDesc<IC, (R, IC::SocketAddr)> for IncludeSocketAddr<SD>
where
SD: SendDesc<IC, R> + Send,
IC: InboundContext,
R: Send,
{
send_desc_passthru_timing!(inner);
send_desc_passthru_options!(inner);
send_desc_passthru_payload!(inner);
send_desc_passthru_supports_option!(inner);
fn handler(
&mut self,
context: Result<&IC, Error>,
) -> Result<ResponseStatus<(R, IC::SocketAddr)>, Error> {
let socket_addr = context.ok().map(|x| x.remote_socket_addr());
self.inner.handler(context).map(|x| match (x, socket_addr) {
(ResponseStatus::Done(x), Some(socket_addr)) => ResponseStatus::Done((x, socket_addr)),
(ResponseStatus::Done(_), None) => unreachable!(),
(ResponseStatus::SendNext, _) => ResponseStatus::SendNext,
(ResponseStatus::Continue, _) => ResponseStatus::Continue,
})
}
} | // | random_line_split |
input.rs | //! This is the implementation of the qlc GUI.
//! There are several distinct
use std::mem;
use glutin::{ElementState, MouseButton};
type Identifier = String;
#[derive(Debug, Serialize, Deserialize)]
pub struct ScreenSlice {
pub x_min: f32,
pub x_max: f32,
pub y_min: f32,
pub y_max: f32
}
impl ScreenSlice {
fn intersect(&self, x: f32, y: f32) -> bool {
x >= self.x_min && x <= self.x_max && y >= self.y_min && y <= self.y_max
}
pub fn as_triangles(&self, depth: f32) -> [[f32; 4]; 6] {
[
[self.x_min, self.y_min, depth, 1.0],
[self.x_min, self.y_max, depth, 1.0],
[self.x_max, self.y_max, depth, 1.0],
[self.x_min, self.y_min, depth, 1.0],
[self.x_max, self.y_min, depth, 1.0],
[self.x_max, self.y_max, depth, 1.0],
]
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Area {
pub id: Identifier,
pub slice: ScreenSlice
}
#[derive(Debug)]
enum Message {
MouseEnter { x: f32, y: f32 },
MouseLeave,
MouseMove { x: f32, y: f32 },
MouseInput { state: ElementState, button: MouseButton },
MouseWheel, | Move,
Enter
}
pub struct Screen {
interact: Vec<Area>,
latest_position: Option<(f32, f32)>,
latest_interact: Option<Identifier>,
}
impl Screen {
pub fn new() -> Self {
Screen {
interact: Vec::new(),
latest_position: None,
latest_interact: None,
}
}
fn send(&self, id: &str, msg: Message) {
println!("{} <- {:?}", id, msg);
}
pub fn add_interact_area(&mut self, area: Area) {
self.interact.push(area);
if let Some((x, y)) = self.latest_position {
self.position(x, y);
}
}
/// This function should be called when the position of the mouse or the interact list changes.
/// It is automatically called when an element is added to the interact list, but needs to be called on every recieved mouse event.
pub fn position(&mut self, x: f32, y: f32) {
self.latest_position = Some((x, y));
// Traverse the list from the tail, so that the most recent added elements at the top of the screen take precedence.
let mut range = 0..self.interact.len();
while let Some(index) = range.next_back() {
let area = &self.interact[index];
if area.slice.intersect(x, y) {
let action = if let Some(ref mut id) = self.latest_interact {
// If there was already a latest interact, we need to make sure it isn't still in the same area.
if &area.id!= id {
// Replace the value of the latest_interact with this area while receiving the last area into the other variable.
let mut other = area.id.clone();
mem::swap(id, &mut other);
Action::LeaveEnter { left: other }
} else {
Action::Move
}
} else {
self.latest_interact = Some(area.id.clone());
Action::Enter
};
match action {
Action::LeaveEnter {left} => {
self.send(&left, Message::MouseLeave);
self.send(&area.id, Message::MouseEnter { x: x, y: y })
},
Action::Move => self.send(&area.id, Message::MouseMove { x: x, y: y }),
Action::Enter => self.send(&area.id, Message::MouseEnter { x: x, y: y } )
};
return
}
}
//println!("MISS: {}, {}", x, y);
if let Some(id) = self.latest_interact.take() {
self.send(&id, Message::MouseLeave);
}
}
pub fn mouse_click(&self, state: ElementState, button: MouseButton) {
let target = if let Some(ref id) = self.latest_interact {
id
} else {
return
};
self.send(target, Message::MouseInput { state: state, button: button });
}
} | KeyboardInput
}
enum Action {
LeaveEnter { left: String }, | random_line_split |
input.rs | //! This is the implementation of the qlc GUI.
//! There are several distinct
use std::mem;
use glutin::{ElementState, MouseButton};
type Identifier = String;
#[derive(Debug, Serialize, Deserialize)]
pub struct ScreenSlice {
pub x_min: f32,
pub x_max: f32,
pub y_min: f32,
pub y_max: f32
}
impl ScreenSlice {
fn intersect(&self, x: f32, y: f32) -> bool {
x >= self.x_min && x <= self.x_max && y >= self.y_min && y <= self.y_max
}
pub fn as_triangles(&self, depth: f32) -> [[f32; 4]; 6] {
[
[self.x_min, self.y_min, depth, 1.0],
[self.x_min, self.y_max, depth, 1.0],
[self.x_max, self.y_max, depth, 1.0],
[self.x_min, self.y_min, depth, 1.0],
[self.x_max, self.y_min, depth, 1.0],
[self.x_max, self.y_max, depth, 1.0],
]
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Area {
pub id: Identifier,
pub slice: ScreenSlice
}
#[derive(Debug)]
enum Message {
MouseEnter { x: f32, y: f32 },
MouseLeave,
MouseMove { x: f32, y: f32 },
MouseInput { state: ElementState, button: MouseButton },
MouseWheel,
KeyboardInput
}
enum Action {
LeaveEnter { left: String },
Move,
Enter
}
pub struct Screen {
interact: Vec<Area>,
latest_position: Option<(f32, f32)>,
latest_interact: Option<Identifier>,
}
impl Screen {
pub fn new() -> Self {
Screen {
interact: Vec::new(),
latest_position: None,
latest_interact: None,
}
}
fn send(&self, id: &str, msg: Message) {
println!("{} <- {:?}", id, msg);
}
pub fn add_interact_area(&mut self, area: Area) {
self.interact.push(area);
if let Some((x, y)) = self.latest_position {
self.position(x, y);
}
}
/// This function should be called when the position of the mouse or the interact list changes.
/// It is automatically called when an element is added to the interact list, but needs to be called on every recieved mouse event.
pub fn position(&mut self, x: f32, y: f32) | }
} else {
self.latest_interact = Some(area.id.clone());
Action::Enter
};
match action {
Action::LeaveEnter {left} => {
self.send(&left, Message::MouseLeave);
self.send(&area.id, Message::MouseEnter { x: x, y: y })
},
Action::Move => self.send(&area.id, Message::MouseMove { x: x, y: y }),
Action::Enter => self.send(&area.id, Message::MouseEnter { x: x, y: y } )
};
return
}
}
//println!("MISS: {}, {}", x, y);
if let Some(id) = self.latest_interact.take() {
self.send(&id, Message::MouseLeave);
}
}
pub fn mouse_click(&self, state: ElementState, button: MouseButton) {
let target = if let Some(ref id) = self.latest_interact {
id
} else {
return
};
self.send(target, Message::MouseInput { state: state, button: button });
}
} | {
self.latest_position = Some((x, y));
// Traverse the list from the tail, so that the most recent added elements at the top of the screen take precedence.
let mut range = 0..self.interact.len();
while let Some(index) = range.next_back() {
let area = &self.interact[index];
if area.slice.intersect(x, y) {
let action = if let Some(ref mut id) = self.latest_interact {
// If there was already a latest interact, we need to make sure it isn't still in the same area.
if &area.id != id {
// Replace the value of the latest_interact with this area while receiving the last area into the other variable.
let mut other = area.id.clone();
mem::swap(id, &mut other);
Action::LeaveEnter { left: other }
} else {
Action::Move | identifier_body |
input.rs | //! This is the implementation of the qlc GUI.
//! There are several distinct
use std::mem;
use glutin::{ElementState, MouseButton};
type Identifier = String;
#[derive(Debug, Serialize, Deserialize)]
pub struct ScreenSlice {
pub x_min: f32,
pub x_max: f32,
pub y_min: f32,
pub y_max: f32
}
impl ScreenSlice {
fn intersect(&self, x: f32, y: f32) -> bool {
x >= self.x_min && x <= self.x_max && y >= self.y_min && y <= self.y_max
}
pub fn as_triangles(&self, depth: f32) -> [[f32; 4]; 6] {
[
[self.x_min, self.y_min, depth, 1.0],
[self.x_min, self.y_max, depth, 1.0],
[self.x_max, self.y_max, depth, 1.0],
[self.x_min, self.y_min, depth, 1.0],
[self.x_max, self.y_min, depth, 1.0],
[self.x_max, self.y_max, depth, 1.0],
]
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Area {
pub id: Identifier,
pub slice: ScreenSlice
}
#[derive(Debug)]
enum | {
MouseEnter { x: f32, y: f32 },
MouseLeave,
MouseMove { x: f32, y: f32 },
MouseInput { state: ElementState, button: MouseButton },
MouseWheel,
KeyboardInput
}
enum Action {
LeaveEnter { left: String },
Move,
Enter
}
pub struct Screen {
interact: Vec<Area>,
latest_position: Option<(f32, f32)>,
latest_interact: Option<Identifier>,
}
impl Screen {
pub fn new() -> Self {
Screen {
interact: Vec::new(),
latest_position: None,
latest_interact: None,
}
}
fn send(&self, id: &str, msg: Message) {
println!("{} <- {:?}", id, msg);
}
pub fn add_interact_area(&mut self, area: Area) {
self.interact.push(area);
if let Some((x, y)) = self.latest_position {
self.position(x, y);
}
}
/// This function should be called when the position of the mouse or the interact list changes.
/// It is automatically called when an element is added to the interact list, but needs to be called on every recieved mouse event.
pub fn position(&mut self, x: f32, y: f32) {
self.latest_position = Some((x, y));
// Traverse the list from the tail, so that the most recent added elements at the top of the screen take precedence.
let mut range = 0..self.interact.len();
while let Some(index) = range.next_back() {
let area = &self.interact[index];
if area.slice.intersect(x, y) {
let action = if let Some(ref mut id) = self.latest_interact {
// If there was already a latest interact, we need to make sure it isn't still in the same area.
if &area.id!= id {
// Replace the value of the latest_interact with this area while receiving the last area into the other variable.
let mut other = area.id.clone();
mem::swap(id, &mut other);
Action::LeaveEnter { left: other }
} else {
Action::Move
}
} else {
self.latest_interact = Some(area.id.clone());
Action::Enter
};
match action {
Action::LeaveEnter {left} => {
self.send(&left, Message::MouseLeave);
self.send(&area.id, Message::MouseEnter { x: x, y: y })
},
Action::Move => self.send(&area.id, Message::MouseMove { x: x, y: y }),
Action::Enter => self.send(&area.id, Message::MouseEnter { x: x, y: y } )
};
return
}
}
//println!("MISS: {}, {}", x, y);
if let Some(id) = self.latest_interact.take() {
self.send(&id, Message::MouseLeave);
}
}
pub fn mouse_click(&self, state: ElementState, button: MouseButton) {
let target = if let Some(ref id) = self.latest_interact {
id
} else {
return
};
self.send(target, Message::MouseInput { state: state, button: button });
}
} | Message | identifier_name |
mod.rs | pub type c_char = i8;
pub type wchar_t = i32;
pub type off_t = i64;
pub type useconds_t = u32;
pub type blkcnt_t = i64;
pub type socklen_t = u32;
pub type sa_family_t = u8;
pub type pthread_t = ::uintptr_t;
s! {
pub struct sockaddr {
pub sa_len: u8,
pub sa_family: sa_family_t,
pub sa_data: [::c_char; 14],
}
pub struct sockaddr_in6 {
pub sin6_len: u8,
pub sin6_family: sa_family_t,
pub sin6_port: ::in_port_t,
pub sin6_flowinfo: u32,
pub sin6_addr: ::in6_addr,
pub sin6_scope_id: u32,
}
pub struct sockaddr_un {
pub sun_len: u8,
pub sun_family: sa_family_t,
pub sun_path: [c_char; 104]
}
| pub struct passwd {
pub pw_name: *mut ::c_char,
pub pw_passwd: *mut ::c_char,
pub pw_uid: ::uid_t,
pub pw_gid: ::gid_t,
pub pw_change: ::time_t,
pub pw_class: *mut ::c_char,
pub pw_gecos: *mut ::c_char,
pub pw_dir: *mut ::c_char,
pub pw_shell: *mut ::c_char,
pub pw_expire: ::time_t,
#[cfg(not(any(target_os = "macos",
target_os = "ios",
target_os = "netbsd")))]
pub pw_fields: ::c_int,
}
pub struct ifaddrs {
pub ifa_next: *mut ifaddrs,
pub ifa_name: *mut ::c_char,
pub ifa_flags: ::c_uint,
pub ifa_addr: *mut ::sockaddr,
pub ifa_netmask: *mut ::sockaddr,
pub ifa_dstaddr: *mut ::sockaddr,
pub ifa_data: *mut ::c_void
}
pub struct fd_set {
fds_bits: [i32; FD_SETSIZE / 32],
}
pub struct tm {
pub tm_sec: ::c_int,
pub tm_min: ::c_int,
pub tm_hour: ::c_int,
pub tm_mday: ::c_int,
pub tm_mon: ::c_int,
pub tm_year: ::c_int,
pub tm_wday: ::c_int,
pub tm_yday: ::c_int,
pub tm_isdst: ::c_int,
pub tm_gmtoff: ::c_long,
pub tm_zone: *mut ::c_char,
}
}
pub const FIOCLEX: ::c_ulong = 0x20006601;
pub const FIONBIO: ::c_ulong = 0x8004667e;
pub const SA_ONSTACK: ::c_int = 0x0001;
pub const SA_SIGINFO: ::c_int = 0x0040;
pub const SA_RESTART: ::c_int = 0x0002;
pub const SA_RESETHAND: ::c_int = 0x0004;
pub const SA_NOCLDSTOP: ::c_int = 0x0008;
pub const SA_NODEFER: ::c_int = 0x0010;
pub const SA_NOCLDWAIT: ::c_int = 0x0020;
pub const SIGCHLD: ::c_int = 20;
pub const SIGBUS: ::c_int = 10;
pub const SIG_SETMASK: ::c_int = 3;
pub const IPV6_MULTICAST_LOOP: ::c_int = 11;
pub const IPV6_V6ONLY: ::c_int = 27;
pub const ST_RDONLY: ::c_ulong = 1;
pub const NI_MAXHOST: ::socklen_t = 1025;
pub const CTL_HW: ::c_int = 6;
pub const HW_NCPU: ::c_int = 3;
f! {
pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
(*set).fds_bits[fd / 32] &=!(1 << (fd % 32));
return
}
pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool {
let fd = fd as usize;
return ((*set).fds_bits[fd / 32] & (1 << (fd % 32)))!= 0
}
pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
(*set).fds_bits[fd / 32] |= 1 << (fd % 32);
return
}
pub fn FD_ZERO(set: *mut fd_set) -> () {
for slot in (*set).fds_bits.iter_mut() {
*slot = 0;
}
}
pub fn WIFEXITED(status: ::c_int) -> bool {
(status & 0x7f) == 0
}
pub fn WEXITSTATUS(status: ::c_int) -> ::c_int {
status >> 8
}
pub fn WTERMSIG(status: ::c_int) -> ::c_int {
status & 0o177
}
}
extern {
pub fn setgroups(ngroups: ::c_int,
ptr: *const ::gid_t) -> ::c_int;
pub fn ioctl(fd: ::c_int, request: ::c_ulong,...) -> ::c_int;
pub fn getnameinfo(sa: *const ::sockaddr,
salen: ::socklen_t,
host: *mut ::c_char,
hostlen: ::socklen_t,
serv: *mut ::c_char,
sevlen: ::socklen_t,
flags: ::c_int) -> ::c_int;
}
cfg_if! {
if #[cfg(any(target_os = "macos", target_os = "ios"))] {
mod apple;
pub use self::apple::*;
} else if #[cfg(any(target_os = "openbsd", target_os = "netbsd",
target_os = "bitrig"))] {
mod openbsdlike;
pub use self::openbsdlike::*;
} else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] {
mod freebsdlike;
pub use self::freebsdlike::*;
} else {
//...
}
} | random_line_split |
|
borrowck-lend-flow.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
// Note: the borrowck analysis is currently flow-insensitive.
// Therefore, some of these errors are marked as spurious and could be
// corrected by a simple change to the analysis. The others are
// either genuine or would require more advanced changes. The latter
// cases are noted.
fn borrow(_v: &int) {}
fn borrow_mut(_v: &mut int) {}
fn cond() -> bool { fail!() }
fn for_func(_f: || -> bool) { fail!() }
fn produce<T>() -> T { fail!(); }
fn inc(v: &mut Box<int>) {
*v = box() (**v + 1);
}
fn pre_freeze() {
// In this instance, the freeze starts before the mut borrow.
let mut v = box 3;
let _w = &v;
borrow_mut(v); //~ ERROR cannot borrow
}
fn post_freeze() {
// In this instance, the const alias starts after the borrow.
let mut v = box 3;
borrow_mut(v);
let _w = &v;
}
fn main() {} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | random_line_split |
borrowck-lend-flow.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Note: the borrowck analysis is currently flow-insensitive.
// Therefore, some of these errors are marked as spurious and could be
// corrected by a simple change to the analysis. The others are
// either genuine or would require more advanced changes. The latter
// cases are noted.
fn borrow(_v: &int) {}
fn borrow_mut(_v: &mut int) {}
fn cond() -> bool { fail!() }
fn for_func(_f: || -> bool) { fail!() }
fn produce<T>() -> T { fail!(); }
fn inc(v: &mut Box<int>) {
*v = box() (**v + 1);
}
fn pre_freeze() {
// In this instance, the freeze starts before the mut borrow.
let mut v = box 3;
let _w = &v;
borrow_mut(v); //~ ERROR cannot borrow
}
fn post_freeze() |
fn main() {}
| {
// In this instance, the const alias starts after the borrow.
let mut v = box 3;
borrow_mut(v);
let _w = &v;
} | identifier_body |
borrowck-lend-flow.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Note: the borrowck analysis is currently flow-insensitive.
// Therefore, some of these errors are marked as spurious and could be
// corrected by a simple change to the analysis. The others are
// either genuine or would require more advanced changes. The latter
// cases are noted.
fn borrow(_v: &int) {}
fn | (_v: &mut int) {}
fn cond() -> bool { fail!() }
fn for_func(_f: || -> bool) { fail!() }
fn produce<T>() -> T { fail!(); }
fn inc(v: &mut Box<int>) {
*v = box() (**v + 1);
}
fn pre_freeze() {
// In this instance, the freeze starts before the mut borrow.
let mut v = box 3;
let _w = &v;
borrow_mut(v); //~ ERROR cannot borrow
}
fn post_freeze() {
// In this instance, the const alias starts after the borrow.
let mut v = box 3;
borrow_mut(v);
let _w = &v;
}
fn main() {}
| borrow_mut | identifier_name |
state_db.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{VecDeque, HashSet};
use lru_cache::LruCache;
use util::journaldb::JournalDB;
use util::hash::{H256};
use util::hashdb::HashDB;
use state::Account;
use header::BlockNumber;
use util::{Arc, Address, Database, DBTransaction, UtilError, Mutex, Hashable};
use bloom_journal::{Bloom, BloomJournal};
use db::COL_ACCOUNT_BLOOM;
use byteorder::{LittleEndian, ByteOrder};
pub const ACCOUNT_BLOOM_SPACE: usize = 1048576;
pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000;
pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count";
const STATE_CACHE_BLOCKS: usize = 12;
/// Shared canonical state cache.
struct AccountCache {
/// DB Account cache. `None` indicates that account is known to be missing.
// When changing the type of the values here, be sure to update `mem_used` and
// `new`.
accounts: LruCache<Address, Option<Account>>,
/// Information on the modifications in recently committed blocks; specifically which addresses
/// changed in which block. Ordered by block number.
modifications: VecDeque<BlockChanges>,
}
/// Buffered account cache item.
struct CacheQueueItem {
/// Account address.
address: Address,
/// Acccount data or `None` if account does not exist.
account: Option<Account>,
/// Indicates that the account was modified before being
/// added to the cache.
modified: bool,
}
#[derive(Debug)]
/// Accumulates a list of accounts changed in a block.
struct BlockChanges {
/// Block number.
number: BlockNumber,
/// Block hash.
hash: H256,
/// Parent block hash.
parent: H256,
/// A set of modified account addresses.
accounts: HashSet<Address>,
/// Block is part of the canonical chain.
is_canon: bool,
}
/// State database abstraction.
/// Manages shared global state cache which reflects the canonical
/// state as it is on the disk. All the entries in the cache are clean.
/// A clone of `StateDB` may be created as canonical or not.
/// For canonical clones local cache is accumulated and applied
/// in `sync_cache`
/// For non-canonical clones local cache is dropped.
///
/// Global cache propagation.
/// After a `State` object has been committed to the trie it
/// propagates its local cache into the `StateDB` local cache
/// using `add_to_account_cache` function.
/// Then, after the block has been added to the chain the local cache in the
/// `StateDB` is propagated into the global cache.
pub struct StateDB {
/// Backing database.
db: Box<JournalDB>,
/// Shared canonical state cache.
account_cache: Arc<Mutex<AccountCache>>,
/// Local dirty cache.
local_cache: Vec<CacheQueueItem>,
/// Shared account bloom. Does not handle chain reorganizations.
account_bloom: Arc<Mutex<Bloom>>,
cache_size: usize,
/// Hash of the block on top of which this instance was created or
/// `None` if cache is disabled
parent_hash: Option<H256>,
/// Hash of the committing block or `None` if not committed yet.
commit_hash: Option<H256>,
/// Number of the committing block or `None` if not committed yet.
commit_number: Option<BlockNumber>,
}
impl StateDB {
/// Create a new instance wrapping `JournalDB` and the maximum allowed size
/// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping.
// TODO: make the cache size actually accurate by moving the account storage cache
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
pub fn new(db: Box<JournalDB>, cache_size: usize) -> StateDB {
let bloom = Self::load_bloom(db.backing());
let cache_items = cache_size / ::std::mem::size_of::<Option<Account>>();
StateDB {
db: db,
account_cache: Arc::new(Mutex::new(AccountCache {
accounts: LruCache::new(cache_items),
modifications: VecDeque::new(),
})),
local_cache: Vec::new(),
account_bloom: Arc::new(Mutex::new(bloom)),
cache_size: cache_size,
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
/// Loads accounts bloom from the database
/// This bloom is used to handle request for the non-existant account fast
pub fn load_bloom(db: &Database) -> Bloom {
let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
.expect("Low-level database error");
let hash_count_bytes = match hash_count_entry {
Some(bytes) => bytes,
None => return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET),
};
assert_eq!(hash_count_bytes.len(), 1);
let hash_count = hash_count_bytes[0];
let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8];
let mut key = [0u8; 8];
for i in 0..ACCOUNT_BLOOM_SPACE / 8 {
LittleEndian::write_u64(&mut key, i as u64);
bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error")
.and_then(|val| Some(LittleEndian::read_u64(&val[..])))
.unwrap_or(0u64);
}
let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32);
trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count);
bloom
}
pub fn check_account_bloom(&self, address: &Address) -> bool {
trace!(target: "account_bloom", "Check account bloom: {:?}", address);
let bloom = self.account_bloom.lock();
bloom.check(&*address.sha3())
}
pub fn note_account_bloom(&self, address: &Address) {
trace!(target: "account_bloom", "Note account bloom: {:?}", address);
let mut bloom = self.account_bloom.lock();
bloom.set(&*address.sha3());
}
pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> {
assert!(journal.hash_functions <= 255);
batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]);
let mut key = [0u8; 8];
let mut val = [0u8; 8];
for (bloom_part_index, bloom_part_value) in journal.entries {
LittleEndian::write_u64(&mut key, bloom_part_index as u64);
LittleEndian::write_u64(&mut val, bloom_part_value);
batch.put(COL_ACCOUNT_BLOOM, &key, &val);
}
Ok(())
}
/// Journal all recent operations under the given era and ID.
pub fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError> {
{
let mut bloom_lock = self.account_bloom.lock();
try!(Self::commit_bloom(batch, bloom_lock.drain_journal()));
}
let records = try!(self.db.journal_under(batch, now, id));
self.commit_hash = Some(id.clone());
self.commit_number = Some(now);
Ok(records)
}
/// Mark a given candidate from an ancient era as canonical, enacting its removals from the
/// backing database and reverting any non-canonical historical commit's insertions.
pub fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result<u32, UtilError> {
self.db.mark_canonical(batch, end_era, canon_id)
}
/// Propagate local cache into the global cache and synchonize
/// the global cache with the best block state.
/// This function updates the global cache by removing entries
/// that are invalidated by chain reorganization. `sync_cache`
/// should be called after the block has been committed and the
/// blockchain route has ben calculated.
pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) {
trace!("sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", self.commit_number, self.commit_hash, self.parent_hash, is_best);
let mut cache = self.account_cache.lock();
let mut cache = &mut *cache;
// Purge changes from re-enacted and retracted blocks.
// Filter out commiting block if any.
let mut clear = false;
for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h!= p)) {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block);
m.is_canon = true;
for a in &m.accounts {
trace!("Reverting enacted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
for block in retracted {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Retracting block {:?}", block);
m.is_canon = false;
for a in &m.accounts {
trace!("Retracted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
if clear {
// We don't know anything about the block; clear everything
trace!("Wiping cache");
cache.accounts.clear();
cache.modifications.clear();
}
// Propagate cache only if committing on top of the latest canonical state
// blocks are ordered by number and only one block with a given number is marked as canonical
// (contributed to canonical state cache)
if let (Some(ref number), Some(ref hash), Some(ref parent)) = (self.commit_number, self.commit_hash, self.parent_hash) {
if cache.modifications.len() == STATE_CACHE_BLOCKS {
cache.modifications.pop_back();
}
let mut modifications = HashSet::new();
trace!("committing {} cache entries", self.local_cache.len());
for account in self.local_cache.drain(..) {
if account.modified {
modifications.insert(account.address.clone());
}
if is_best {
if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&account.address) {
if let Some(new) = account.account {
if account.modified {
existing.overwrite_with(new);
}
continue;
}
}
cache.accounts.insert(account.address, account.account);
}
}
// Save modified accounts. These are ordered by the block number.
let block_changes = BlockChanges {
accounts: modifications,
number: *number,
hash: hash.clone(),
is_canon: is_best,
parent: parent.clone(),
};
let insert_at = cache.modifications.iter().enumerate().find(|&(_, ref m)| m.number < *number).map(|(i, _)| i);
trace!("inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes);
} else {
cache.modifications.push_back(block_changes);
}
}
}
/// Returns an interface to HashDB.
pub fn as_hashdb(&self) -> &HashDB {
self.db.as_hashdb()
}
/// Returns an interface to mutable HashDB.
pub fn as_hashdb_mut(&mut self) -> &mut HashDB {
self.db.as_hashdb_mut()
}
/// Clone the database.
pub fn boxed_clone(&self) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
local_cache: Vec::new(),
account_bloom: self.account_bloom.clone(),
cache_size: self.cache_size,
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
/// Clone the database for a canonical state.
pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
local_cache: Vec::new(),
account_bloom: self.account_bloom.clone(), | }
/// Check if pruning is enabled on the database.
pub fn is_pruned(&self) -> bool {
self.db.is_pruned()
}
/// Heap size used.
pub fn mem_used(&self) -> usize {
// TODO: account for LRU-cache overhead; this is a close approximation.
self.db.mem_used() + self.account_cache.lock().accounts.len() * ::std::mem::size_of::<Option<Account>>()
}
/// Returns underlying `JournalDB`.
pub fn journal_db(&self) -> &JournalDB {
&*self.db
}
/// Add a local cache entry.
/// The entry will be propagated to the global cache in `sync_cache`.
/// `modified` indicates that the entry was changed since being read from disk or global cache.
/// `data` can be set to an existing (`Some`), or non-existing account (`None`).
pub fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool) {
self.local_cache.push(CacheQueueItem {
address: addr,
account: data,
modified: modified,
})
}
/// Get basic copy of the cached account. Does not include storage.
/// Returns 'None' if cache is disabled or if the account is not cached.
pub fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> {
let mut cache = self.account_cache.lock();
if!Self::is_allowed(addr, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
}
/// Get value from a cached account.
/// Returns 'None' if cache is disabled or if the account is not cached.
pub fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U>
where F: FnOnce(Option<&mut Account>) -> U {
let mut cache = self.account_cache.lock();
if!Self::is_allowed(a, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
}
/// Query how much memory is set aside for the accounts cache (in bytes).
pub fn cache_size(&self) -> usize {
self.cache_size
}
/// Check if the account can be returned from cache by matching current block parent hash against canonical
/// state and filtering out account modified in later blocks.
fn is_allowed(addr: &Address, parent_hash: &Option<H256>, modifications: &VecDeque<BlockChanges>) -> bool {
let mut parent = match *parent_hash {
None => {
trace!("Cache lookup skipped for {:?}: no parent hash", addr);
return false;
}
Some(ref parent) => parent,
};
if modifications.is_empty() {
return true;
}
// Ignore all accounts modified in later blocks
// Modifications contains block ordered by the number
// We search for our parent in that list first and then for
// all its parent until we hit the canonical block,
// checking against all the intermediate modifications.
let mut iter = modifications.iter();
while let Some(ref m) = iter.next() {
if &m.hash == parent {
if m.is_canon {
return true;
}
parent = &m.parent;
}
if m.accounts.contains(addr) {
trace!("Cache lookup skipped for {:?}: modified in a later block", addr);
return false;
}
}
trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr);
return false;
}
}
#[cfg(test)]
mod tests {
use util::{U256, H256, FixedHash, Address, DBTransaction};
use tests::helpers::*;
use state::Account;
use util::log::init_log;
#[test]
fn state_db_smoke() {
init_log();
let mut state_db_result = get_temp_state_db();
let state_db = state_db_result.take();
let root_parent = H256::random();
let address = Address::random();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut batch = DBTransaction::new(state_db.journal_db().backing());
// blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ]
// balance [ 5 5 4 3 2 2 ]
let mut s = state_db.boxed_clone_canon(&root_parent);
s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false);
s.journal_under(&mut batch, 0, &h0).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.journal_under(&mut batch, 1, &h1a).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true);
s.journal_under(&mut batch, 1, &h1b).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1b);
s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true);
s.journal_under(&mut batch, 2, &h2b).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1a);
s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true);
s.journal_under(&mut batch, 2, &h2a).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h2a);
s.journal_under(&mut batch, 3, &h3a).unwrap();
s.sync_cache(&[], &[], true);
let s = state_db.boxed_clone_canon(&h3a);
assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5));
let s = state_db.boxed_clone_canon(&h1a);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h2b);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h1b);
assert!(s.get_cached_account(&address).is_none());
// reorg to 3b
// blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ]
let mut s = state_db.boxed_clone_canon(&h2b);
s.journal_under(&mut batch, 3, &h3b).unwrap();
s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true);
let s = state_db.boxed_clone_canon(&h3a);
assert!(s.get_cached_account(&address).is_none());
}
} | cache_size: self.cache_size,
parent_hash: Some(parent.clone()),
commit_hash: None,
commit_number: None,
} | random_line_split |
state_db.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{VecDeque, HashSet};
use lru_cache::LruCache;
use util::journaldb::JournalDB;
use util::hash::{H256};
use util::hashdb::HashDB;
use state::Account;
use header::BlockNumber;
use util::{Arc, Address, Database, DBTransaction, UtilError, Mutex, Hashable};
use bloom_journal::{Bloom, BloomJournal};
use db::COL_ACCOUNT_BLOOM;
use byteorder::{LittleEndian, ByteOrder};
pub const ACCOUNT_BLOOM_SPACE: usize = 1048576;
pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000;
pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count";
const STATE_CACHE_BLOCKS: usize = 12;
/// Shared canonical state cache.
struct AccountCache {
/// DB Account cache. `None` indicates that account is known to be missing.
// When changing the type of the values here, be sure to update `mem_used` and
// `new`.
accounts: LruCache<Address, Option<Account>>,
/// Information on the modifications in recently committed blocks; specifically which addresses
/// changed in which block. Ordered by block number.
modifications: VecDeque<BlockChanges>,
}
/// Buffered account cache item.
struct CacheQueueItem {
/// Account address.
address: Address,
/// Acccount data or `None` if account does not exist.
account: Option<Account>,
/// Indicates that the account was modified before being
/// added to the cache.
modified: bool,
}
#[derive(Debug)]
/// Accumulates a list of accounts changed in a block.
struct BlockChanges {
/// Block number.
number: BlockNumber,
/// Block hash.
hash: H256,
/// Parent block hash.
parent: H256,
/// A set of modified account addresses.
accounts: HashSet<Address>,
/// Block is part of the canonical chain.
is_canon: bool,
}
/// State database abstraction.
/// Manages shared global state cache which reflects the canonical
/// state as it is on the disk. All the entries in the cache are clean.
/// A clone of `StateDB` may be created as canonical or not.
/// For canonical clones local cache is accumulated and applied
/// in `sync_cache`
/// For non-canonical clones local cache is dropped.
///
/// Global cache propagation.
/// After a `State` object has been committed to the trie it
/// propagates its local cache into the `StateDB` local cache
/// using `add_to_account_cache` function.
/// Then, after the block has been added to the chain the local cache in the
/// `StateDB` is propagated into the global cache.
pub struct StateDB {
/// Backing database.
db: Box<JournalDB>,
/// Shared canonical state cache.
account_cache: Arc<Mutex<AccountCache>>,
/// Local dirty cache.
local_cache: Vec<CacheQueueItem>,
/// Shared account bloom. Does not handle chain reorganizations.
account_bloom: Arc<Mutex<Bloom>>,
cache_size: usize,
/// Hash of the block on top of which this instance was created or
/// `None` if cache is disabled
parent_hash: Option<H256>,
/// Hash of the committing block or `None` if not committed yet.
commit_hash: Option<H256>,
/// Number of the committing block or `None` if not committed yet.
commit_number: Option<BlockNumber>,
}
impl StateDB {
/// Create a new instance wrapping `JournalDB` and the maximum allowed size
/// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping.
// TODO: make the cache size actually accurate by moving the account storage cache
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
pub fn new(db: Box<JournalDB>, cache_size: usize) -> StateDB {
let bloom = Self::load_bloom(db.backing());
let cache_items = cache_size / ::std::mem::size_of::<Option<Account>>();
StateDB {
db: db,
account_cache: Arc::new(Mutex::new(AccountCache {
accounts: LruCache::new(cache_items),
modifications: VecDeque::new(),
})),
local_cache: Vec::new(),
account_bloom: Arc::new(Mutex::new(bloom)),
cache_size: cache_size,
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
/// Loads accounts bloom from the database
/// This bloom is used to handle request for the non-existant account fast
pub fn load_bloom(db: &Database) -> Bloom {
let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
.expect("Low-level database error");
let hash_count_bytes = match hash_count_entry {
Some(bytes) => bytes,
None => return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET),
};
assert_eq!(hash_count_bytes.len(), 1);
let hash_count = hash_count_bytes[0];
let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8];
let mut key = [0u8; 8];
for i in 0..ACCOUNT_BLOOM_SPACE / 8 {
LittleEndian::write_u64(&mut key, i as u64);
bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error")
.and_then(|val| Some(LittleEndian::read_u64(&val[..])))
.unwrap_or(0u64);
}
let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32);
trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count);
bloom
}
pub fn check_account_bloom(&self, address: &Address) -> bool {
trace!(target: "account_bloom", "Check account bloom: {:?}", address);
let bloom = self.account_bloom.lock();
bloom.check(&*address.sha3())
}
pub fn note_account_bloom(&self, address: &Address) {
trace!(target: "account_bloom", "Note account bloom: {:?}", address);
let mut bloom = self.account_bloom.lock();
bloom.set(&*address.sha3());
}
pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> {
assert!(journal.hash_functions <= 255);
batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]);
let mut key = [0u8; 8];
let mut val = [0u8; 8];
for (bloom_part_index, bloom_part_value) in journal.entries {
LittleEndian::write_u64(&mut key, bloom_part_index as u64);
LittleEndian::write_u64(&mut val, bloom_part_value);
batch.put(COL_ACCOUNT_BLOOM, &key, &val);
}
Ok(())
}
/// Journal all recent operations under the given era and ID.
pub fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError> {
{
let mut bloom_lock = self.account_bloom.lock();
try!(Self::commit_bloom(batch, bloom_lock.drain_journal()));
}
let records = try!(self.db.journal_under(batch, now, id));
self.commit_hash = Some(id.clone());
self.commit_number = Some(now);
Ok(records)
}
/// Mark a given candidate from an ancient era as canonical, enacting its removals from the
/// backing database and reverting any non-canonical historical commit's insertions.
pub fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result<u32, UtilError> {
self.db.mark_canonical(batch, end_era, canon_id)
}
/// Propagate local cache into the global cache and synchonize
/// the global cache with the best block state.
/// This function updates the global cache by removing entries
/// that are invalidated by chain reorganization. `sync_cache`
/// should be called after the block has been committed and the
/// blockchain route has ben calculated.
pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) {
trace!("sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", self.commit_number, self.commit_hash, self.parent_hash, is_best);
let mut cache = self.account_cache.lock();
let mut cache = &mut *cache;
// Purge changes from re-enacted and retracted blocks.
// Filter out commiting block if any.
let mut clear = false;
for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h!= p)) {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block);
m.is_canon = true;
for a in &m.accounts {
trace!("Reverting enacted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
for block in retracted {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Retracting block {:?}", block);
m.is_canon = false;
for a in &m.accounts {
trace!("Retracted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
if clear {
// We don't know anything about the block; clear everything
trace!("Wiping cache");
cache.accounts.clear();
cache.modifications.clear();
}
// Propagate cache only if committing on top of the latest canonical state
// blocks are ordered by number and only one block with a given number is marked as canonical
// (contributed to canonical state cache)
if let (Some(ref number), Some(ref hash), Some(ref parent)) = (self.commit_number, self.commit_hash, self.parent_hash) {
if cache.modifications.len() == STATE_CACHE_BLOCKS {
cache.modifications.pop_back();
}
let mut modifications = HashSet::new();
trace!("committing {} cache entries", self.local_cache.len());
for account in self.local_cache.drain(..) {
if account.modified {
modifications.insert(account.address.clone());
}
if is_best {
if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&account.address) {
if let Some(new) = account.account {
if account.modified {
existing.overwrite_with(new);
}
continue;
}
}
cache.accounts.insert(account.address, account.account);
}
}
// Save modified accounts. These are ordered by the block number.
let block_changes = BlockChanges {
accounts: modifications,
number: *number,
hash: hash.clone(),
is_canon: is_best,
parent: parent.clone(),
};
let insert_at = cache.modifications.iter().enumerate().find(|&(_, ref m)| m.number < *number).map(|(i, _)| i);
trace!("inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes);
} else {
cache.modifications.push_back(block_changes);
}
}
}
/// Returns an interface to HashDB.
pub fn as_hashdb(&self) -> &HashDB {
self.db.as_hashdb()
}
/// Returns an interface to mutable HashDB.
pub fn as_hashdb_mut(&mut self) -> &mut HashDB {
self.db.as_hashdb_mut()
}
/// Clone the database.
pub fn boxed_clone(&self) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
local_cache: Vec::new(),
account_bloom: self.account_bloom.clone(),
cache_size: self.cache_size,
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
/// Clone the database for a canonical state.
pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
local_cache: Vec::new(),
account_bloom: self.account_bloom.clone(),
cache_size: self.cache_size,
parent_hash: Some(parent.clone()),
commit_hash: None,
commit_number: None,
}
}
/// Check if pruning is enabled on the database.
pub fn is_pruned(&self) -> bool {
self.db.is_pruned()
}
/// Heap size used.
pub fn mem_used(&self) -> usize {
// TODO: account for LRU-cache overhead; this is a close approximation.
self.db.mem_used() + self.account_cache.lock().accounts.len() * ::std::mem::size_of::<Option<Account>>()
}
/// Returns underlying `JournalDB`.
pub fn journal_db(&self) -> &JournalDB {
&*self.db
}
/// Add a local cache entry.
/// The entry will be propagated to the global cache in `sync_cache`.
/// `modified` indicates that the entry was changed since being read from disk or global cache.
/// `data` can be set to an existing (`Some`), or non-existing account (`None`).
pub fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool) {
self.local_cache.push(CacheQueueItem {
address: addr,
account: data,
modified: modified,
})
}
/// Get basic copy of the cached account. Does not include storage.
/// Returns 'None' if cache is disabled or if the account is not cached.
pub fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> {
let mut cache = self.account_cache.lock();
if!Self::is_allowed(addr, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
}
/// Get value from a cached account.
/// Returns 'None' if cache is disabled or if the account is not cached.
pub fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U>
where F: FnOnce(Option<&mut Account>) -> U {
let mut cache = self.account_cache.lock();
if!Self::is_allowed(a, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
}
/// Query how much memory is set aside for the accounts cache (in bytes).
pub fn cache_size(&self) -> usize {
self.cache_size
}
/// Check if the account can be returned from cache by matching current block parent hash against canonical
/// state and filtering out account modified in later blocks.
fn is_allowed(addr: &Address, parent_hash: &Option<H256>, modifications: &VecDeque<BlockChanges>) -> bool | return true;
}
parent = &m.parent;
}
if m.accounts.contains(addr) {
trace!("Cache lookup skipped for {:?}: modified in a later block", addr);
return false;
}
}
trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr);
return false;
}
}
#[cfg(test)]
mod tests {
use util::{U256, H256, FixedHash, Address, DBTransaction};
use tests::helpers::*;
use state::Account;
use util::log::init_log;
#[test]
fn state_db_smoke() {
init_log();
let mut state_db_result = get_temp_state_db();
let state_db = state_db_result.take();
let root_parent = H256::random();
let address = Address::random();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut batch = DBTransaction::new(state_db.journal_db().backing());
// blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ]
// balance [ 5 5 4 3 2 2 ]
let mut s = state_db.boxed_clone_canon(&root_parent);
s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false);
s.journal_under(&mut batch, 0, &h0).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.journal_under(&mut batch, 1, &h1a).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true);
s.journal_under(&mut batch, 1, &h1b).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1b);
s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true);
s.journal_under(&mut batch, 2, &h2b).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1a);
s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true);
s.journal_under(&mut batch, 2, &h2a).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h2a);
s.journal_under(&mut batch, 3, &h3a).unwrap();
s.sync_cache(&[], &[], true);
let s = state_db.boxed_clone_canon(&h3a);
assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5));
let s = state_db.boxed_clone_canon(&h1a);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h2b);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h1b);
assert!(s.get_cached_account(&address).is_none());
// reorg to 3b
// blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ]
let mut s = state_db.boxed_clone_canon(&h2b);
s.journal_under(&mut batch, 3, &h3b).unwrap();
s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true);
let s = state_db.boxed_clone_canon(&h3a);
assert!(s.get_cached_account(&address).is_none());
}
}
| {
let mut parent = match *parent_hash {
None => {
trace!("Cache lookup skipped for {:?}: no parent hash", addr);
return false;
}
Some(ref parent) => parent,
};
if modifications.is_empty() {
return true;
}
// Ignore all accounts modified in later blocks
// Modifications contains block ordered by the number
// We search for our parent in that list first and then for
// all its parent until we hit the canonical block,
// checking against all the intermediate modifications.
let mut iter = modifications.iter();
while let Some(ref m) = iter.next() {
if &m.hash == parent {
if m.is_canon { | identifier_body |
state_db.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{VecDeque, HashSet};
use lru_cache::LruCache;
use util::journaldb::JournalDB;
use util::hash::{H256};
use util::hashdb::HashDB;
use state::Account;
use header::BlockNumber;
use util::{Arc, Address, Database, DBTransaction, UtilError, Mutex, Hashable};
use bloom_journal::{Bloom, BloomJournal};
use db::COL_ACCOUNT_BLOOM;
use byteorder::{LittleEndian, ByteOrder};
pub const ACCOUNT_BLOOM_SPACE: usize = 1048576;
pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000;
pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count";
const STATE_CACHE_BLOCKS: usize = 12;
/// Shared canonical state cache.
struct AccountCache {
/// DB Account cache. `None` indicates that account is known to be missing.
// When changing the type of the values here, be sure to update `mem_used` and
// `new`.
accounts: LruCache<Address, Option<Account>>,
/// Information on the modifications in recently committed blocks; specifically which addresses
/// changed in which block. Ordered by block number.
modifications: VecDeque<BlockChanges>,
}
/// Buffered account cache item.
struct CacheQueueItem {
/// Account address.
address: Address,
/// Acccount data or `None` if account does not exist.
account: Option<Account>,
/// Indicates that the account was modified before being
/// added to the cache.
modified: bool,
}
#[derive(Debug)]
/// Accumulates a list of accounts changed in a block.
struct BlockChanges {
/// Block number.
number: BlockNumber,
/// Block hash.
hash: H256,
/// Parent block hash.
parent: H256,
/// A set of modified account addresses.
accounts: HashSet<Address>,
/// Block is part of the canonical chain.
is_canon: bool,
}
/// State database abstraction.
/// Manages shared global state cache which reflects the canonical
/// state as it is on the disk. All the entries in the cache are clean.
/// A clone of `StateDB` may be created as canonical or not.
/// For canonical clones local cache is accumulated and applied
/// in `sync_cache`
/// For non-canonical clones local cache is dropped.
///
/// Global cache propagation.
/// After a `State` object has been committed to the trie it
/// propagates its local cache into the `StateDB` local cache
/// using `add_to_account_cache` function.
/// Then, after the block has been added to the chain the local cache in the
/// `StateDB` is propagated into the global cache.
pub struct StateDB {
/// Backing database.
db: Box<JournalDB>,
/// Shared canonical state cache.
account_cache: Arc<Mutex<AccountCache>>,
/// Local dirty cache.
local_cache: Vec<CacheQueueItem>,
/// Shared account bloom. Does not handle chain reorganizations.
account_bloom: Arc<Mutex<Bloom>>,
cache_size: usize,
/// Hash of the block on top of which this instance was created or
/// `None` if cache is disabled
parent_hash: Option<H256>,
/// Hash of the committing block or `None` if not committed yet.
commit_hash: Option<H256>,
/// Number of the committing block or `None` if not committed yet.
commit_number: Option<BlockNumber>,
}
impl StateDB {
/// Create a new instance wrapping `JournalDB` and the maximum allowed size
/// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping.
// TODO: make the cache size actually accurate by moving the account storage cache
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
pub fn new(db: Box<JournalDB>, cache_size: usize) -> StateDB {
let bloom = Self::load_bloom(db.backing());
let cache_items = cache_size / ::std::mem::size_of::<Option<Account>>();
StateDB {
db: db,
account_cache: Arc::new(Mutex::new(AccountCache {
accounts: LruCache::new(cache_items),
modifications: VecDeque::new(),
})),
local_cache: Vec::new(),
account_bloom: Arc::new(Mutex::new(bloom)),
cache_size: cache_size,
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
/// Loads accounts bloom from the database
/// This bloom is used to handle request for the non-existant account fast
pub fn load_bloom(db: &Database) -> Bloom {
let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
.expect("Low-level database error");
let hash_count_bytes = match hash_count_entry {
Some(bytes) => bytes,
None => return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET),
};
assert_eq!(hash_count_bytes.len(), 1);
let hash_count = hash_count_bytes[0];
let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8];
let mut key = [0u8; 8];
for i in 0..ACCOUNT_BLOOM_SPACE / 8 {
LittleEndian::write_u64(&mut key, i as u64);
bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error")
.and_then(|val| Some(LittleEndian::read_u64(&val[..])))
.unwrap_or(0u64);
}
let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32);
trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count);
bloom
}
pub fn check_account_bloom(&self, address: &Address) -> bool {
trace!(target: "account_bloom", "Check account bloom: {:?}", address);
let bloom = self.account_bloom.lock();
bloom.check(&*address.sha3())
}
pub fn note_account_bloom(&self, address: &Address) {
trace!(target: "account_bloom", "Note account bloom: {:?}", address);
let mut bloom = self.account_bloom.lock();
bloom.set(&*address.sha3());
}
pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> {
assert!(journal.hash_functions <= 255);
batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]);
let mut key = [0u8; 8];
let mut val = [0u8; 8];
for (bloom_part_index, bloom_part_value) in journal.entries {
LittleEndian::write_u64(&mut key, bloom_part_index as u64);
LittleEndian::write_u64(&mut val, bloom_part_value);
batch.put(COL_ACCOUNT_BLOOM, &key, &val);
}
Ok(())
}
/// Journal all recent operations under the given era and ID.
pub fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError> {
{
let mut bloom_lock = self.account_bloom.lock();
try!(Self::commit_bloom(batch, bloom_lock.drain_journal()));
}
let records = try!(self.db.journal_under(batch, now, id));
self.commit_hash = Some(id.clone());
self.commit_number = Some(now);
Ok(records)
}
/// Mark a given candidate from an ancient era as canonical, enacting its removals from the
/// backing database and reverting any non-canonical historical commit's insertions.
pub fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result<u32, UtilError> {
self.db.mark_canonical(batch, end_era, canon_id)
}
/// Propagate local cache into the global cache and synchonize
/// the global cache with the best block state.
/// This function updates the global cache by removing entries
/// that are invalidated by chain reorganization. `sync_cache`
/// should be called after the block has been committed and the
/// blockchain route has ben calculated.
pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) {
trace!("sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", self.commit_number, self.commit_hash, self.parent_hash, is_best);
let mut cache = self.account_cache.lock();
let mut cache = &mut *cache;
// Purge changes from re-enacted and retracted blocks.
// Filter out commiting block if any.
let mut clear = false;
for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h!= p)) {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block);
m.is_canon = true;
for a in &m.accounts {
trace!("Reverting enacted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
for block in retracted {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Retracting block {:?}", block);
m.is_canon = false;
for a in &m.accounts {
trace!("Retracted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
if clear {
// We don't know anything about the block; clear everything
trace!("Wiping cache");
cache.accounts.clear();
cache.modifications.clear();
}
// Propagate cache only if committing on top of the latest canonical state
// blocks are ordered by number and only one block with a given number is marked as canonical
// (contributed to canonical state cache)
if let (Some(ref number), Some(ref hash), Some(ref parent)) = (self.commit_number, self.commit_hash, self.parent_hash) {
if cache.modifications.len() == STATE_CACHE_BLOCKS {
cache.modifications.pop_back();
}
let mut modifications = HashSet::new();
trace!("committing {} cache entries", self.local_cache.len());
for account in self.local_cache.drain(..) {
if account.modified {
modifications.insert(account.address.clone());
}
if is_best {
if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&account.address) {
if let Some(new) = account.account {
if account.modified {
existing.overwrite_with(new);
}
continue;
}
}
cache.accounts.insert(account.address, account.account);
}
}
// Save modified accounts. These are ordered by the block number.
let block_changes = BlockChanges {
accounts: modifications,
number: *number,
hash: hash.clone(),
is_canon: is_best,
parent: parent.clone(),
};
let insert_at = cache.modifications.iter().enumerate().find(|&(_, ref m)| m.number < *number).map(|(i, _)| i);
trace!("inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes);
} else {
cache.modifications.push_back(block_changes);
}
}
}
/// Returns an interface to HashDB.
pub fn as_hashdb(&self) -> &HashDB {
self.db.as_hashdb()
}
/// Returns an interface to mutable HashDB.
pub fn as_hashdb_mut(&mut self) -> &mut HashDB {
self.db.as_hashdb_mut()
}
/// Clone the database.
pub fn boxed_clone(&self) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
local_cache: Vec::new(),
account_bloom: self.account_bloom.clone(),
cache_size: self.cache_size,
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
/// Clone the database for a canonical state.
pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
local_cache: Vec::new(),
account_bloom: self.account_bloom.clone(),
cache_size: self.cache_size,
parent_hash: Some(parent.clone()),
commit_hash: None,
commit_number: None,
}
}
/// Check if pruning is enabled on the database.
pub fn is_pruned(&self) -> bool {
self.db.is_pruned()
}
/// Heap size used.
pub fn mem_used(&self) -> usize {
// TODO: account for LRU-cache overhead; this is a close approximation.
self.db.mem_used() + self.account_cache.lock().accounts.len() * ::std::mem::size_of::<Option<Account>>()
}
/// Returns underlying `JournalDB`.
pub fn journal_db(&self) -> &JournalDB {
&*self.db
}
/// Add a local cache entry.
/// The entry will be propagated to the global cache in `sync_cache`.
/// `modified` indicates that the entry was changed since being read from disk or global cache.
/// `data` can be set to an existing (`Some`), or non-existing account (`None`).
pub fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool) {
self.local_cache.push(CacheQueueItem {
address: addr,
account: data,
modified: modified,
})
}
/// Get basic copy of the cached account. Does not include storage.
/// Returns 'None' if cache is disabled or if the account is not cached.
pub fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> {
let mut cache = self.account_cache.lock();
if!Self::is_allowed(addr, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
}
/// Get value from a cached account.
/// Returns 'None' if cache is disabled or if the account is not cached.
pub fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U>
where F: FnOnce(Option<&mut Account>) -> U {
let mut cache = self.account_cache.lock();
if!Self::is_allowed(a, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
}
/// Query how much memory is set aside for the accounts cache (in bytes).
pub fn cache_size(&self) -> usize {
self.cache_size
}
/// Check if the account can be returned from cache by matching current block parent hash against canonical
/// state and filtering out account modified in later blocks.
fn is_allowed(addr: &Address, parent_hash: &Option<H256>, modifications: &VecDeque<BlockChanges>) -> bool {
let mut parent = match *parent_hash {
None => {
trace!("Cache lookup skipped for {:?}: no parent hash", addr);
return false;
}
Some(ref parent) => parent,
};
if modifications.is_empty() |
// Ignore all accounts modified in later blocks
// Modifications contains block ordered by the number
// We search for our parent in that list first and then for
// all its parent until we hit the canonical block,
// checking against all the intermediate modifications.
let mut iter = modifications.iter();
while let Some(ref m) = iter.next() {
if &m.hash == parent {
if m.is_canon {
return true;
}
parent = &m.parent;
}
if m.accounts.contains(addr) {
trace!("Cache lookup skipped for {:?}: modified in a later block", addr);
return false;
}
}
trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr);
return false;
}
}
#[cfg(test)]
mod tests {
use util::{U256, H256, FixedHash, Address, DBTransaction};
use tests::helpers::*;
use state::Account;
use util::log::init_log;
#[test]
fn state_db_smoke() {
init_log();
let mut state_db_result = get_temp_state_db();
let state_db = state_db_result.take();
let root_parent = H256::random();
let address = Address::random();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut batch = DBTransaction::new(state_db.journal_db().backing());
// blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ]
// balance [ 5 5 4 3 2 2 ]
let mut s = state_db.boxed_clone_canon(&root_parent);
s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false);
s.journal_under(&mut batch, 0, &h0).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.journal_under(&mut batch, 1, &h1a).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true);
s.journal_under(&mut batch, 1, &h1b).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1b);
s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true);
s.journal_under(&mut batch, 2, &h2b).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1a);
s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true);
s.journal_under(&mut batch, 2, &h2a).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h2a);
s.journal_under(&mut batch, 3, &h3a).unwrap();
s.sync_cache(&[], &[], true);
let s = state_db.boxed_clone_canon(&h3a);
assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5));
let s = state_db.boxed_clone_canon(&h1a);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h2b);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h1b);
assert!(s.get_cached_account(&address).is_none());
// reorg to 3b
// blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ]
let mut s = state_db.boxed_clone_canon(&h2b);
s.journal_under(&mut batch, 3, &h3b).unwrap();
s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true);
let s = state_db.boxed_clone_canon(&h3a);
assert!(s.get_cached_account(&address).is_none());
}
}
| {
return true;
} | conditional_block |
state_db.rs | // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{VecDeque, HashSet};
use lru_cache::LruCache;
use util::journaldb::JournalDB;
use util::hash::{H256};
use util::hashdb::HashDB;
use state::Account;
use header::BlockNumber;
use util::{Arc, Address, Database, DBTransaction, UtilError, Mutex, Hashable};
use bloom_journal::{Bloom, BloomJournal};
use db::COL_ACCOUNT_BLOOM;
use byteorder::{LittleEndian, ByteOrder};
pub const ACCOUNT_BLOOM_SPACE: usize = 1048576;
pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000;
pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count";
const STATE_CACHE_BLOCKS: usize = 12;
/// Shared canonical state cache.
struct AccountCache {
/// DB Account cache. `None` indicates that account is known to be missing.
// When changing the type of the values here, be sure to update `mem_used` and
// `new`.
accounts: LruCache<Address, Option<Account>>,
/// Information on the modifications in recently committed blocks; specifically which addresses
/// changed in which block. Ordered by block number.
modifications: VecDeque<BlockChanges>,
}
/// Buffered account cache item.
struct CacheQueueItem {
/// Account address.
address: Address,
/// Acccount data or `None` if account does not exist.
account: Option<Account>,
/// Indicates that the account was modified before being
/// added to the cache.
modified: bool,
}
#[derive(Debug)]
/// Accumulates a list of accounts changed in a block.
struct BlockChanges {
/// Block number.
number: BlockNumber,
/// Block hash.
hash: H256,
/// Parent block hash.
parent: H256,
/// A set of modified account addresses.
accounts: HashSet<Address>,
/// Block is part of the canonical chain.
is_canon: bool,
}
/// State database abstraction.
/// Manages shared global state cache which reflects the canonical
/// state as it is on the disk. All the entries in the cache are clean.
/// A clone of `StateDB` may be created as canonical or not.
/// For canonical clones local cache is accumulated and applied
/// in `sync_cache`
/// For non-canonical clones local cache is dropped.
///
/// Global cache propagation.
/// After a `State` object has been committed to the trie it
/// propagates its local cache into the `StateDB` local cache
/// using `add_to_account_cache` function.
/// Then, after the block has been added to the chain the local cache in the
/// `StateDB` is propagated into the global cache.
pub struct StateDB {
/// Backing database.
db: Box<JournalDB>,
/// Shared canonical state cache.
account_cache: Arc<Mutex<AccountCache>>,
/// Local dirty cache.
local_cache: Vec<CacheQueueItem>,
/// Shared account bloom. Does not handle chain reorganizations.
account_bloom: Arc<Mutex<Bloom>>,
cache_size: usize,
/// Hash of the block on top of which this instance was created or
/// `None` if cache is disabled
parent_hash: Option<H256>,
/// Hash of the committing block or `None` if not committed yet.
commit_hash: Option<H256>,
/// Number of the committing block or `None` if not committed yet.
commit_number: Option<BlockNumber>,
}
impl StateDB {
/// Create a new instance wrapping `JournalDB` and the maximum allowed size
/// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping.
// TODO: make the cache size actually accurate by moving the account storage cache
// into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`.
pub fn new(db: Box<JournalDB>, cache_size: usize) -> StateDB {
let bloom = Self::load_bloom(db.backing());
let cache_items = cache_size / ::std::mem::size_of::<Option<Account>>();
StateDB {
db: db,
account_cache: Arc::new(Mutex::new(AccountCache {
accounts: LruCache::new(cache_items),
modifications: VecDeque::new(),
})),
local_cache: Vec::new(),
account_bloom: Arc::new(Mutex::new(bloom)),
cache_size: cache_size,
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
/// Loads accounts bloom from the database
/// This bloom is used to handle request for the non-existant account fast
pub fn | (db: &Database) -> Bloom {
let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
.expect("Low-level database error");
let hash_count_bytes = match hash_count_entry {
Some(bytes) => bytes,
None => return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET),
};
assert_eq!(hash_count_bytes.len(), 1);
let hash_count = hash_count_bytes[0];
let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8];
let mut key = [0u8; 8];
for i in 0..ACCOUNT_BLOOM_SPACE / 8 {
LittleEndian::write_u64(&mut key, i as u64);
bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error")
.and_then(|val| Some(LittleEndian::read_u64(&val[..])))
.unwrap_or(0u64);
}
let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32);
trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count);
bloom
}
pub fn check_account_bloom(&self, address: &Address) -> bool {
trace!(target: "account_bloom", "Check account bloom: {:?}", address);
let bloom = self.account_bloom.lock();
bloom.check(&*address.sha3())
}
pub fn note_account_bloom(&self, address: &Address) {
trace!(target: "account_bloom", "Note account bloom: {:?}", address);
let mut bloom = self.account_bloom.lock();
bloom.set(&*address.sha3());
}
pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> {
assert!(journal.hash_functions <= 255);
batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]);
let mut key = [0u8; 8];
let mut val = [0u8; 8];
for (bloom_part_index, bloom_part_value) in journal.entries {
LittleEndian::write_u64(&mut key, bloom_part_index as u64);
LittleEndian::write_u64(&mut val, bloom_part_value);
batch.put(COL_ACCOUNT_BLOOM, &key, &val);
}
Ok(())
}
/// Journal all recent operations under the given era and ID.
pub fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result<u32, UtilError> {
{
let mut bloom_lock = self.account_bloom.lock();
try!(Self::commit_bloom(batch, bloom_lock.drain_journal()));
}
let records = try!(self.db.journal_under(batch, now, id));
self.commit_hash = Some(id.clone());
self.commit_number = Some(now);
Ok(records)
}
/// Mark a given candidate from an ancient era as canonical, enacting its removals from the
/// backing database and reverting any non-canonical historical commit's insertions.
pub fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result<u32, UtilError> {
self.db.mark_canonical(batch, end_era, canon_id)
}
/// Propagate local cache into the global cache and synchonize
/// the global cache with the best block state.
/// This function updates the global cache by removing entries
/// that are invalidated by chain reorganization. `sync_cache`
/// should be called after the block has been committed and the
/// blockchain route has ben calculated.
pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) {
trace!("sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", self.commit_number, self.commit_hash, self.parent_hash, is_best);
let mut cache = self.account_cache.lock();
let mut cache = &mut *cache;
// Purge changes from re-enacted and retracted blocks.
// Filter out commiting block if any.
let mut clear = false;
for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h!= p)) {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block);
m.is_canon = true;
for a in &m.accounts {
trace!("Reverting enacted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
for block in retracted {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Retracting block {:?}", block);
m.is_canon = false;
for a in &m.accounts {
trace!("Retracted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
if clear {
// We don't know anything about the block; clear everything
trace!("Wiping cache");
cache.accounts.clear();
cache.modifications.clear();
}
// Propagate cache only if committing on top of the latest canonical state
// blocks are ordered by number and only one block with a given number is marked as canonical
// (contributed to canonical state cache)
if let (Some(ref number), Some(ref hash), Some(ref parent)) = (self.commit_number, self.commit_hash, self.parent_hash) {
if cache.modifications.len() == STATE_CACHE_BLOCKS {
cache.modifications.pop_back();
}
let mut modifications = HashSet::new();
trace!("committing {} cache entries", self.local_cache.len());
for account in self.local_cache.drain(..) {
if account.modified {
modifications.insert(account.address.clone());
}
if is_best {
if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&account.address) {
if let Some(new) = account.account {
if account.modified {
existing.overwrite_with(new);
}
continue;
}
}
cache.accounts.insert(account.address, account.account);
}
}
// Save modified accounts. These are ordered by the block number.
let block_changes = BlockChanges {
accounts: modifications,
number: *number,
hash: hash.clone(),
is_canon: is_best,
parent: parent.clone(),
};
let insert_at = cache.modifications.iter().enumerate().find(|&(_, ref m)| m.number < *number).map(|(i, _)| i);
trace!("inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes);
} else {
cache.modifications.push_back(block_changes);
}
}
}
/// Returns an interface to HashDB.
pub fn as_hashdb(&self) -> &HashDB {
self.db.as_hashdb()
}
/// Returns an interface to mutable HashDB.
pub fn as_hashdb_mut(&mut self) -> &mut HashDB {
self.db.as_hashdb_mut()
}
/// Clone the database.
pub fn boxed_clone(&self) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
local_cache: Vec::new(),
account_bloom: self.account_bloom.clone(),
cache_size: self.cache_size,
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
/// Clone the database for a canonical state.
pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB {
StateDB {
db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(),
local_cache: Vec::new(),
account_bloom: self.account_bloom.clone(),
cache_size: self.cache_size,
parent_hash: Some(parent.clone()),
commit_hash: None,
commit_number: None,
}
}
/// Check if pruning is enabled on the database.
pub fn is_pruned(&self) -> bool {
self.db.is_pruned()
}
/// Heap size used.
pub fn mem_used(&self) -> usize {
// TODO: account for LRU-cache overhead; this is a close approximation.
self.db.mem_used() + self.account_cache.lock().accounts.len() * ::std::mem::size_of::<Option<Account>>()
}
/// Returns underlying `JournalDB`.
pub fn journal_db(&self) -> &JournalDB {
&*self.db
}
/// Add a local cache entry.
/// The entry will be propagated to the global cache in `sync_cache`.
/// `modified` indicates that the entry was changed since being read from disk or global cache.
/// `data` can be set to an existing (`Some`), or non-existing account (`None`).
pub fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool) {
self.local_cache.push(CacheQueueItem {
address: addr,
account: data,
modified: modified,
})
}
/// Get basic copy of the cached account. Does not include storage.
/// Returns 'None' if cache is disabled or if the account is not cached.
pub fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> {
let mut cache = self.account_cache.lock();
if!Self::is_allowed(addr, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
}
/// Get value from a cached account.
/// Returns 'None' if cache is disabled or if the account is not cached.
pub fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U>
where F: FnOnce(Option<&mut Account>) -> U {
let mut cache = self.account_cache.lock();
if!Self::is_allowed(a, &self.parent_hash, &cache.modifications) {
return None;
}
cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
}
/// Query how much memory is set aside for the accounts cache (in bytes).
pub fn cache_size(&self) -> usize {
self.cache_size
}
/// Check if the account can be returned from cache by matching current block parent hash against canonical
/// state and filtering out account modified in later blocks.
fn is_allowed(addr: &Address, parent_hash: &Option<H256>, modifications: &VecDeque<BlockChanges>) -> bool {
let mut parent = match *parent_hash {
None => {
trace!("Cache lookup skipped for {:?}: no parent hash", addr);
return false;
}
Some(ref parent) => parent,
};
if modifications.is_empty() {
return true;
}
// Ignore all accounts modified in later blocks
// Modifications contains block ordered by the number
// We search for our parent in that list first and then for
// all its parent until we hit the canonical block,
// checking against all the intermediate modifications.
let mut iter = modifications.iter();
while let Some(ref m) = iter.next() {
if &m.hash == parent {
if m.is_canon {
return true;
}
parent = &m.parent;
}
if m.accounts.contains(addr) {
trace!("Cache lookup skipped for {:?}: modified in a later block", addr);
return false;
}
}
trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr);
return false;
}
}
#[cfg(test)]
mod tests {
use util::{U256, H256, FixedHash, Address, DBTransaction};
use tests::helpers::*;
use state::Account;
use util::log::init_log;
#[test]
fn state_db_smoke() {
init_log();
let mut state_db_result = get_temp_state_db();
let state_db = state_db_result.take();
let root_parent = H256::random();
let address = Address::random();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut batch = DBTransaction::new(state_db.journal_db().backing());
// blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ]
// balance [ 5 5 4 3 2 2 ]
let mut s = state_db.boxed_clone_canon(&root_parent);
s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false);
s.journal_under(&mut batch, 0, &h0).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.journal_under(&mut batch, 1, &h1a).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true);
s.journal_under(&mut batch, 1, &h1b).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1b);
s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true);
s.journal_under(&mut batch, 2, &h2b).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1a);
s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true);
s.journal_under(&mut batch, 2, &h2a).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h2a);
s.journal_under(&mut batch, 3, &h3a).unwrap();
s.sync_cache(&[], &[], true);
let s = state_db.boxed_clone_canon(&h3a);
assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5));
let s = state_db.boxed_clone_canon(&h1a);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h2b);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h1b);
assert!(s.get_cached_account(&address).is_none());
// reorg to 3b
// blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ]
let mut s = state_db.boxed_clone_canon(&h2b);
s.journal_under(&mut batch, 3, &h3b).unwrap();
s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true);
let s = state_db.boxed_clone_canon(&h3a);
assert!(s.get_cached_account(&address).is_none());
}
}
| load_bloom | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `webrender_api` crate contains an assortment types and functions used
//! by WebRender consumers as well as, in many cases, WebRender itself.
//!
//! This separation allows Servo to parallelize compilation across `webrender`
//! and other crates that depend on `webrender_api`. So in practice, we put
//! things in this crate when Servo needs to use them. Firefox depends on the
//! `webrender` crate directly, and so this distinction is not really relevant
//! there.
#![cfg_attr(feature = "nightly", feature(nonzero))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::float_cmp, clippy::too_many_arguments))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal, clippy::new_without_default))]
pub extern crate crossbeam_channel;
pub extern crate euclid;
extern crate app_units;
#[macro_use]
extern crate bitflags;
extern crate byteorder;
#[cfg(feature = "nightly")]
extern crate core;
extern crate derive_more;
#[macro_use]
extern crate malloc_size_of_derive;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate time;
extern crate malloc_size_of;
extern crate peek_poke;
pub mod channel;
mod color;
mod display_item;
mod display_item_cache;
mod display_list;
mod font;
mod gradient_builder;
mod image;
pub mod units;
pub use crate::color::*;
pub use crate::display_item::*;
pub use crate::display_item_cache::DisplayItemCache;
pub use crate::display_list::*;
pub use crate::font::*;
pub use crate::gradient_builder::*;
pub use crate::image::*;
use crate::units::*;
use crate::channel::Receiver;
use std::marker::PhantomData;
use std::sync::Arc;
use std::os::raw::c_void;
use peek_poke::PeekPoke;
/// Defined here for cbindgen
pub const MAX_RENDER_TASK_SIZE: i32 = 16384;
/// Width and height in device pixels of image tiles.
pub type TileSize = u16;
/// Various settings that the caller can select based on desired tradeoffs
/// between rendering quality and performance / power usage.
#[derive(Copy, Clone, Deserialize, Serialize)]
pub struct QualitySettings {
/// If true, disable creating separate picture cache slices when the
/// scroll root changes. This gives maximum opportunity to find an
/// opaque background, which enables subpixel AA. However, it is
/// usually significantly more expensive to render when scrolling.
pub force_subpixel_aa_where_possible: bool,
}
impl Default for QualitySettings {
fn default() -> Self {
QualitySettings {
// Prefer performance over maximum subpixel AA quality, since WR
// already enables subpixel AA in more situations than other browsers.
force_subpixel_aa_where_possible: false,
}
}
}
/// An epoch identifies the state of a pipeline in time.
///
/// This is mostly used as a synchronization mechanism to observe how/when particular pipeline
/// updates propagate through WebRender and are applied at various stages.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct Epoch(pub u32);
impl Epoch {
/// Magic invalid epoch value.
pub fn invalid() -> Epoch {
Epoch(u32::MAX)
}
}
/// ID namespaces uniquely identify different users of WebRender's API.
///
/// For example in Gecko each content process uses a separate id namespace.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Eq, MallocSizeOf, PartialEq, Hash, Ord, PartialOrd, PeekPoke)]
#[derive(Deserialize, Serialize)]
pub struct IdNamespace(pub u32);
/// A key uniquely identifying a WebRender document.
///
/// Instances can manage one or several documents (using the same render backend thread).
/// Each document will internally correspond to a single scene, and scenes are made of
/// one or several pipelines.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct DocumentId {
///
pub namespace_id: IdNamespace,
///
pub id: u32,
}
impl DocumentId {
///
pub fn new(namespace_id: IdNamespace, id: u32) -> Self {
DocumentId {
namespace_id,
id,
}
}
///
pub const INVALID: DocumentId = DocumentId { namespace_id: IdNamespace(0), id: 0 };
}
/// This type carries no valuable semantics for WR. However, it reflects the fact that
/// clients (Servo) may generate pipelines by different semi-independent sources.
/// These pipelines still belong to the same `IdNamespace` and the same `DocumentId`.
/// Having this extra Id field enables them to generate `PipelineId` without collision.
pub type PipelineSourceId = u32;
/// From the point of view of WR, `PipelineId` is completely opaque and generic as long as
/// it's clonable, serializable, comparable, and hashable.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct PipelineId(pub PipelineSourceId, pub u32);
impl Default for PipelineId {
fn default() -> Self {
PipelineId::dummy()
}
}
impl PipelineId {
///
pub fn dummy() -> Self {
PipelineId(!0,!0)
}
}
/// An opaque pointer-sized value.
#[repr(C)]
#[derive(Clone)]
pub struct ExternalEvent {
raw: usize,
}
unsafe impl Send for ExternalEvent {}
impl ExternalEvent {
/// Creates the event from an opaque pointer-sized value.
pub fn from_raw(raw: usize) -> Self {
ExternalEvent { raw }
}
/// Consumes self to make it obvious that the event should be forwarded only once.
pub fn unwrap(self) -> usize {
self.raw
}
}
pub type APZScrollGeneration = u64;
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize, Default)]
pub struct SampledScrollOffset {
pub offset: LayoutVector2D,
pub generation: APZScrollGeneration,
}
/// A flag in each scrollable frame to represent whether the owner of the frame document
/// has any scroll-linked effect.
/// See https://firefox-source-docs.mozilla.org/performance/scroll-linked_effects.html
/// for a definition of scroll-linked effect.
#[repr(u8)]
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize, PeekPoke)]
pub enum HasScrollLinkedEffect {
Yes,
No,
}
impl Default for HasScrollLinkedEffect {
fn default() -> Self {
HasScrollLinkedEffect::No
}
}
/// A handler to integrate WebRender with the thread that contains the `Renderer`.
pub trait RenderNotifier: Send {
///
fn clone(&self) -> Box<dyn RenderNotifier>;
/// Wake the thread containing the `Renderer` up (after updates have been put
/// in the renderer's queue).
fn wake_up(
&self,
composite_needed: bool,
);
/// Notify the thread containing the `Renderer` that a new frame is ready.
fn new_frame_ready(&self, _: DocumentId, scrolled: bool, composite_needed: bool, render_time_ns: Option<u64>);
/// A Gecko-specific notification mechanism to get some code executed on the
/// `Renderer`'s thread, mostly replaced by `NotificationHandler`. You should
/// probably use the latter instead.
fn external_event(&self, _evt: ExternalEvent) {
unimplemented!()
}
/// Notify the thread containing the `Renderer` that the render backend has been
/// shut down.
fn shut_down(&self) {}
}
/// A stage of the rendering pipeline.
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Checkpoint {
///
SceneBuilt,
///
FrameBuilt,
///
FrameTexturesUpdated,
///
FrameRendered,
/// NotificationRequests get notified with this if they get dropped without having been
/// notified. This provides the guarantee that if a request is created it will get notified.
TransactionDropped,
}
/// A handler to notify when a transaction reaches certain stages of the rendering
/// pipeline.
pub trait NotificationHandler : Send + Sync {
/// Entry point of the handler to implement. Invoked by WebRender.
fn notify(&self, when: Checkpoint);
}
/// A request to notify a handler when the transaction reaches certain stages of the
/// rendering pipeline.
///
/// The request is guaranteed to be notified once and only once, even if the transaction
/// is dropped before the requested check-point.
pub struct NotificationRequest {
handler: Option<Box<dyn NotificationHandler>>,
when: Checkpoint,
}
impl NotificationRequest {
/// Constructor.
pub fn new(when: Checkpoint, handler: Box<dyn NotificationHandler>) -> Self {
NotificationRequest {
handler: Some(handler),
when,
}
}
/// The specified stage at which point the handler should be notified.
pub fn when(&self) -> Checkpoint { self.when }
/// Called by WebRender at specified stages to notify the registered handler.
pub fn notify(mut self) {
if let Some(handler) = self.handler.take() {
handler.notify(self.when);
}
}
}
/// An object that can perform hit-testing without doing synchronous queries to
/// the RenderBackendThread.
pub trait ApiHitTester: Send + Sync {
/// Does a hit test on display items in the specified document, at the given
/// point. The vector of hit results will contain all display items that match,
/// ordered from front to back.
fn hit_test(&self, point: WorldPoint) -> HitTestResult;
}
/// A hit tester requested to the render backend thread but not necessarily ready yet.
///
/// The request should be resolved as late as possible to reduce the likelihood of blocking.
pub struct HitTesterRequest {
#[doc(hidden)]
pub rx: Receiver<Arc<dyn ApiHitTester>>,
}
impl HitTesterRequest {
/// Block until the hit tester is available and return it, consuming teh request.
pub fn | (self) -> Arc<dyn ApiHitTester> {
self.rx.recv().unwrap()
}
}
/// Describe an item that matched a hit-test query.
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct HitTestItem {
/// The pipeline that the display item that was hit belongs to.
pub pipeline: PipelineId,
/// The tag of the hit display item.
pub tag: ItemTag,
}
/// Returned by `RenderApi::hit_test`.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct HitTestResult {
/// List of items that are match the hit-test query.
pub items: Vec<HitTestItem>,
}
impl Drop for NotificationRequest {
fn drop(&mut self) {
if let Some(ref mut handler) = self.handler {
handler.notify(Checkpoint::TransactionDropped);
}
}
}
// This Clone impl yields an "empty" request because we don't want the requests
// to be notified twice so the request is owned by only one of the API messages
// (the original one) after the clone.
// This works in practice because the notifications requests are used for
// synchronization so we don't need to include them in the recording mechanism
// in wrench that clones the messages.
impl Clone for NotificationRequest {
fn clone(&self) -> Self {
NotificationRequest {
when: self.when,
handler: None,
}
}
}
/// A key to identify an animated property binding.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Deserialize, MallocSizeOf, PartialEq, Serialize, Eq, Hash, PeekPoke)]
pub struct PropertyBindingId {
pub namespace: IdNamespace,
pub uid: u32,
}
impl PropertyBindingId {
/// Constructor.
pub fn new(value: u64) -> Self {
PropertyBindingId {
namespace: IdNamespace((value >> 32) as u32),
uid: value as u32,
}
}
}
/// A unique key that is used for connecting animated property
/// values to bindings in the display list.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct PropertyBindingKey<T> {
///
pub id: PropertyBindingId,
#[doc(hidden)]
pub _phantom: PhantomData<T>,
}
/// Construct a property value from a given key and value.
impl<T: Copy> PropertyBindingKey<T> {
///
pub fn with(self, value: T) -> PropertyValue<T> {
PropertyValue { key: self, value }
}
}
impl<T> PropertyBindingKey<T> {
/// Constructor.
pub fn new(value: u64) -> Self {
PropertyBindingKey {
id: PropertyBindingId::new(value),
_phantom: PhantomData,
}
}
}
/// A binding property can either be a specific value
/// (the normal, non-animated case) or point to a binding location
/// to fetch the current value from.
/// Note that Binding has also a non-animated value, the value is
/// used for the case where the animation is still in-delay phase
/// (i.e. the animation doesn't produce any animation values).
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub enum PropertyBinding<T> {
/// Non-animated value.
Value(T),
/// Animated binding.
Binding(PropertyBindingKey<T>, T),
}
impl<T: Default> Default for PropertyBinding<T> {
fn default() -> Self {
PropertyBinding::Value(Default::default())
}
}
impl<T> From<T> for PropertyBinding<T> {
fn from(value: T) -> PropertyBinding<T> {
PropertyBinding::Value(value)
}
}
impl From<PropertyBindingKey<ColorF>> for PropertyBindingKey<ColorU> {
fn from(key: PropertyBindingKey<ColorF>) -> PropertyBindingKey<ColorU> {
PropertyBindingKey {
id: key.id.clone(),
_phantom: PhantomData,
}
}
}
impl From<PropertyBindingKey<ColorU>> for PropertyBindingKey<ColorF> {
fn from(key: PropertyBindingKey<ColorU>) -> PropertyBindingKey<ColorF> {
PropertyBindingKey {
id: key.id.clone(),
_phantom: PhantomData,
}
}
}
impl From<PropertyBinding<ColorF>> for PropertyBinding<ColorU> {
fn from(value: PropertyBinding<ColorF>) -> PropertyBinding<ColorU> {
match value {
PropertyBinding::Value(value) => PropertyBinding::Value(value.into()),
PropertyBinding::Binding(k, v) => {
PropertyBinding::Binding(k.into(), v.into())
}
}
}
}
impl From<PropertyBinding<ColorU>> for PropertyBinding<ColorF> {
fn from(value: PropertyBinding<ColorU>) -> PropertyBinding<ColorF> {
match value {
PropertyBinding::Value(value) => PropertyBinding::Value(value.into()),
PropertyBinding::Binding(k, v) => {
PropertyBinding::Binding(k.into(), v.into())
}
}
}
}
/// The current value of an animated property. This is
/// supplied by the calling code.
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq)]
pub struct PropertyValue<T> {
///
pub key: PropertyBindingKey<T>,
///
pub value: T,
}
/// When using `generate_frame()`, a list of `PropertyValue` structures
/// can optionally be supplied to provide the current value of any
/// animated properties.
#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Default)]
pub struct DynamicProperties {
/// transform list
pub transforms: Vec<PropertyValue<LayoutTransform>>,
/// opacity
pub floats: Vec<PropertyValue<f32>>,
/// background color
pub colors: Vec<PropertyValue<ColorF>>,
}
impl DynamicProperties {
/// Extend the properties.
pub fn extend(&mut self, other: Self) {
self.transforms.extend(other.transforms);
self.floats.extend(other.floats);
self.colors.extend(other.colors);
}
}
/// A C function that takes a pointer to a heap allocation and returns its size.
///
/// This is borrowed from the malloc_size_of crate, upon which we want to avoid
/// a dependency from WebRender.
pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize;
/// A configuration option that can be changed at runtime.
///
/// # Adding a new configuration option
///
/// - Add a new enum variant here.
/// - Add the entry in WR_BOOL_PARAMETER_LIST in gfxPlatform.cpp.
/// - React to the parameter change anywhere in WebRender where a SetParam message is received.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Parameter {
Bool(BoolParameter, bool),
Int(IntParameter, i32),
}
/// Boolean configuration option.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
pub enum BoolParameter {
PboUploads = 0,
Multithreading = 1,
BatchedUploads = 2,
DrawCallsForTextureCopy = 3,
}
/// Integer configuration option.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
pub enum IntParameter {
BatchedUploadThreshold = 0,
}
bitflags! {
/// Flags to track why we are rendering.
#[repr(C)]
#[derive(Default, Deserialize, MallocSizeOf, Serialize)]
pub struct RenderReasons: u32 {
/// Equivalent of empty() for the C++ side.
const NONE = 0;
const SCENE = 1 << 0;
const ANIMATED_PROPERTY = 1 << 1;
const RESOURCE_UPDATE = 1 << 2;
const ASYNC_IMAGE = 1 << 3;
const CLEAR_RESOURCES = 1 << 4;
const APZ = 1 << 5;
/// Window resize
const RESIZE = 1 << 6;
/// Various widget-related reasons
const WIDGET = 1 << 7;
/// See Frame::must_be_drawn
const TEXTURE_CACHE_FLUSH = 1 << 8;
const SNAPSHOT = 1 << 9;
const POST_RESOURCE_UPDATES_HOOK = 1 << 10;
const CONFIG_CHANGE = 1 << 11;
const CONTENT_SYNC = 1 << 12;
const FLUSH = 1 << 13;
const TESTING = 1 << 14;
const OTHER = 1 << 15;
/// Vsync isn't actually "why" we render but it can be useful
/// to see which frames were driven by the vsync scheduler so
/// we store a bit for it.
const VSYNC = 1 << 16;
}
}
impl RenderReasons {
pub const NUM_BITS: u32 = 17;
}
bitflags! {
/// Flags to enable/disable various builtin debugging tools.
#[repr(C)]
#[derive(Default, Deserialize, MallocSizeOf, Serialize)]
pub struct DebugFlags: u32 {
/// Display the frame profiler on screen.
const PROFILER_DBG = 1 << 0;
/// Display intermediate render targets on screen.
const RENDER_TARGET_DBG = 1 << 1;
/// Display all texture cache pages on screen.
const TEXTURE_CACHE_DBG = 1 << 2;
/// Display GPU timing results.
const GPU_TIME_QUERIES = 1 << 3;
/// Query the number of pixels that pass the depth test divided and show it
/// in the profiler as a percentage of the number of pixels in the screen
/// (window width times height).
const GPU_SAMPLE_QUERIES = 1 << 4;
/// Render each quad with their own draw call.
///
/// Terrible for performance but can help with understanding the drawing
/// order when inspecting renderdoc or apitrace recordings.
const DISABLE_BATCHING = 1 << 5;
/// Display the pipeline epochs.
const EPOCHS = 1 << 6;
/// Print driver messages to stdout.
const ECHO_DRIVER_MESSAGES = 1 << 7;
/// Show an overlay displaying overdraw amount.
const SHOW_OVERDRAW = 1 << 8;
/// Display the contents of GPU cache.
const GPU_CACHE_DBG = 1 << 9;
/// Clear evicted parts of the texture cache for debugging purposes.
const TEXTURE_CACHE_DBG_CLEAR_EVICTED = 1 << 10;
/// Show picture caching debug overlay
const PICTURE_CACHING_DBG = 1 << 11;
/// Highlight all primitives with colors based on kind.
const PRIMITIVE_DBG = 1 << 12;
/// Draw a zoom widget showing part of the framebuffer zoomed in.
const ZOOM_DBG = 1 << 13;
/// Scale the debug renderer down for a smaller screen. This will disrupt
/// any mapping between debug display items and page content, so shouldn't
/// be used with overlays like the picture caching or primitive display.
const SMALL_SCREEN = 1 << 14;
/// Disable various bits of the WebRender pipeline, to help narrow
/// down where slowness might be coming from.
const DISABLE_OPAQUE_PASS = 1 << 15;
///
const DISABLE_ALPHA_PASS = 1 << 16;
///
const DISABLE_CLIP_MASKS = 1 << 17;
///
const DISABLE_TEXT_PRIMS = 1 << 18;
///
const DISABLE_GRADIENT_PRIMS = 1 << 19;
///
const OBSCURE_IMAGES = 1 << 20;
/// Taint the transparent area of the glyphs with a random opacity to easily
/// see when glyphs are re-rasterized.
const GLYPH_FLASHING = 1 << 21;
/// The profiler only displays information that is out of the ordinary.
const SMART_PROFILER = 1 << 22;
/// If set, dump picture cache invalidation debug to console.
const INVALIDATION_DBG = 1 << 23;
/// Collect and dump profiler statistics to captures.
const PROFILER_CAPTURE = (1 as u32) << 25; // need "as u32" until we have cbindgen#556
/// Invalidate picture tiles every frames (useful when inspecting GPU work in external tools).
const FORCE_PICTURE_INVALIDATION = (1 as u32) << 26;
/// Display window visibility on screen.
const WINDOW_VISIBILITY_DBG = 1 << 27;
}
}
/// Information specific to a primitive type that
/// uniquely identifies a primitive template by key.
#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash, Serialize, Deserialize)]
pub enum PrimitiveKeyKind {
/// Clear an existing rect, used for special effects on some platforms.
Clear,
///
Rectangle {
///
color: PropertyBinding<ColorU>,
},
}
///
#[derive(Clone, Copy, Debug)]
pub enum ScrollLocation {
/// Scroll by a certain amount.
Delta(LayoutVector2D),
/// Scroll to very top of element.
Start,
/// Scroll to very bottom of element.
End,
}
/// Crash annotations included in crash reports.
#[repr(C)]
#[derive(Clone, Copy)]
pub enum CrashAnnotation {
CompileShader = 0,
DrawShader = 1,
}
/// Handler to expose support for annotating crash reports.
pub trait CrashAnnotator : Send {
fn set(&self, annotation: CrashAnnotation, value: &std::ffi::CStr);
fn clear(&self, annotation: CrashAnnotation);
fn box_clone(&self) -> Box<dyn CrashAnnotator>;
}
impl Clone for Box<dyn CrashAnnotator> {
fn clone(&self) -> Box<dyn CrashAnnotator> {
self.box_clone()
}
}
/// Guard to add a crash annotation at creation, and clear it at destruction.
pub struct CrashAnnotatorGuard<'a> {
annotator: &'a Option<Box<dyn CrashAnnotator>>,
annotation: CrashAnnotation,
}
impl<'a> CrashAnnotatorGuard<'a> {
pub fn new(
annotator: &'a Option<Box<dyn CrashAnnotator>>,
annotation: CrashAnnotation,
value: &std::ffi::CStr,
) -> Self {
if let Some(ref annotator) = annotator {
annotator.set(annotation, value);
}
Self {
annotator,
annotation,
}
}
}
impl<'a> Drop for CrashAnnotatorGuard<'a> {
fn drop(&mut self) {
if let Some(ref annotator) = self.annotator {
annotator.clear(self.annotation);
}
}
}
| resolve | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `webrender_api` crate contains an assortment types and functions used
//! by WebRender consumers as well as, in many cases, WebRender itself.
//!
//! This separation allows Servo to parallelize compilation across `webrender`
//! and other crates that depend on `webrender_api`. So in practice, we put
//! things in this crate when Servo needs to use them. Firefox depends on the
//! `webrender` crate directly, and so this distinction is not really relevant
//! there.
#![cfg_attr(feature = "nightly", feature(nonzero))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::float_cmp, clippy::too_many_arguments))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal, clippy::new_without_default))]
pub extern crate crossbeam_channel;
pub extern crate euclid;
extern crate app_units;
#[macro_use]
extern crate bitflags;
extern crate byteorder;
#[cfg(feature = "nightly")]
extern crate core;
extern crate derive_more;
#[macro_use]
extern crate malloc_size_of_derive;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate time;
extern crate malloc_size_of;
extern crate peek_poke;
pub mod channel;
mod color;
mod display_item;
mod display_item_cache;
mod display_list;
mod font;
mod gradient_builder;
mod image;
pub mod units;
pub use crate::color::*;
pub use crate::display_item::*;
pub use crate::display_item_cache::DisplayItemCache;
pub use crate::display_list::*;
pub use crate::font::*;
pub use crate::gradient_builder::*;
pub use crate::image::*;
use crate::units::*;
use crate::channel::Receiver;
use std::marker::PhantomData;
use std::sync::Arc;
use std::os::raw::c_void;
use peek_poke::PeekPoke;
/// Defined here for cbindgen
pub const MAX_RENDER_TASK_SIZE: i32 = 16384;
/// Width and height in device pixels of image tiles.
pub type TileSize = u16;
/// Various settings that the caller can select based on desired tradeoffs
/// between rendering quality and performance / power usage.
#[derive(Copy, Clone, Deserialize, Serialize)]
pub struct QualitySettings {
/// If true, disable creating separate picture cache slices when the
/// scroll root changes. This gives maximum opportunity to find an
/// opaque background, which enables subpixel AA. However, it is
/// usually significantly more expensive to render when scrolling.
pub force_subpixel_aa_where_possible: bool,
}
impl Default for QualitySettings {
fn default() -> Self {
QualitySettings {
// Prefer performance over maximum subpixel AA quality, since WR
// already enables subpixel AA in more situations than other browsers.
force_subpixel_aa_where_possible: false,
}
}
}
/// An epoch identifies the state of a pipeline in time.
///
/// This is mostly used as a synchronization mechanism to observe how/when particular pipeline
/// updates propagate through WebRender and are applied at various stages.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct Epoch(pub u32);
impl Epoch {
/// Magic invalid epoch value.
pub fn invalid() -> Epoch {
Epoch(u32::MAX)
}
}
/// ID namespaces uniquely identify different users of WebRender's API.
///
/// For example in Gecko each content process uses a separate id namespace.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Eq, MallocSizeOf, PartialEq, Hash, Ord, PartialOrd, PeekPoke)]
#[derive(Deserialize, Serialize)]
pub struct IdNamespace(pub u32);
/// A key uniquely identifying a WebRender document.
///
/// Instances can manage one or several documents (using the same render backend thread).
/// Each document will internally correspond to a single scene, and scenes are made of
/// one or several pipelines.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct DocumentId {
///
pub namespace_id: IdNamespace,
///
pub id: u32,
}
impl DocumentId {
///
pub fn new(namespace_id: IdNamespace, id: u32) -> Self {
DocumentId {
namespace_id,
id,
}
}
///
pub const INVALID: DocumentId = DocumentId { namespace_id: IdNamespace(0), id: 0 };
}
/// This type carries no valuable semantics for WR. However, it reflects the fact that
/// clients (Servo) may generate pipelines by different semi-independent sources.
/// These pipelines still belong to the same `IdNamespace` and the same `DocumentId`.
/// Having this extra Id field enables them to generate `PipelineId` without collision.
pub type PipelineSourceId = u32;
/// From the point of view of WR, `PipelineId` is completely opaque and generic as long as
/// it's clonable, serializable, comparable, and hashable.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct PipelineId(pub PipelineSourceId, pub u32);
impl Default for PipelineId {
fn default() -> Self {
PipelineId::dummy()
}
}
impl PipelineId {
///
pub fn dummy() -> Self {
PipelineId(!0,!0)
}
}
/// An opaque pointer-sized value.
#[repr(C)]
#[derive(Clone)]
pub struct ExternalEvent {
raw: usize,
}
unsafe impl Send for ExternalEvent {}
impl ExternalEvent {
/// Creates the event from an opaque pointer-sized value.
pub fn from_raw(raw: usize) -> Self {
ExternalEvent { raw }
}
/// Consumes self to make it obvious that the event should be forwarded only once.
pub fn unwrap(self) -> usize {
self.raw
}
}
pub type APZScrollGeneration = u64;
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize, Default)]
pub struct SampledScrollOffset {
pub offset: LayoutVector2D,
pub generation: APZScrollGeneration,
}
/// A flag in each scrollable frame to represent whether the owner of the frame document
/// has any scroll-linked effect.
/// See https://firefox-source-docs.mozilla.org/performance/scroll-linked_effects.html
/// for a definition of scroll-linked effect.
#[repr(u8)]
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize, PeekPoke)]
pub enum HasScrollLinkedEffect {
Yes,
No,
}
impl Default for HasScrollLinkedEffect {
fn default() -> Self {
HasScrollLinkedEffect::No
}
}
/// A handler to integrate WebRender with the thread that contains the `Renderer`.
pub trait RenderNotifier: Send {
///
fn clone(&self) -> Box<dyn RenderNotifier>;
/// Wake the thread containing the `Renderer` up (after updates have been put
/// in the renderer's queue).
fn wake_up(
&self,
composite_needed: bool,
);
/// Notify the thread containing the `Renderer` that a new frame is ready.
fn new_frame_ready(&self, _: DocumentId, scrolled: bool, composite_needed: bool, render_time_ns: Option<u64>);
/// A Gecko-specific notification mechanism to get some code executed on the
/// `Renderer`'s thread, mostly replaced by `NotificationHandler`. You should
/// probably use the latter instead.
fn external_event(&self, _evt: ExternalEvent) {
unimplemented!()
}
/// Notify the thread containing the `Renderer` that the render backend has been
/// shut down.
fn shut_down(&self) {}
}
/// A stage of the rendering pipeline.
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Checkpoint {
///
SceneBuilt,
///
FrameBuilt,
///
FrameTexturesUpdated,
///
FrameRendered,
/// NotificationRequests get notified with this if they get dropped without having been
/// notified. This provides the guarantee that if a request is created it will get notified.
TransactionDropped,
}
/// A handler to notify when a transaction reaches certain stages of the rendering
/// pipeline.
pub trait NotificationHandler : Send + Sync {
/// Entry point of the handler to implement. Invoked by WebRender.
fn notify(&self, when: Checkpoint);
}
/// A request to notify a handler when the transaction reaches certain stages of the
/// rendering pipeline.
///
/// The request is guaranteed to be notified once and only once, even if the transaction
/// is dropped before the requested check-point.
pub struct NotificationRequest {
handler: Option<Box<dyn NotificationHandler>>,
when: Checkpoint,
}
impl NotificationRequest {
/// Constructor.
pub fn new(when: Checkpoint, handler: Box<dyn NotificationHandler>) -> Self {
NotificationRequest {
handler: Some(handler),
when,
}
}
/// The specified stage at which point the handler should be notified.
pub fn when(&self) -> Checkpoint { self.when }
/// Called by WebRender at specified stages to notify the registered handler.
pub fn notify(mut self) {
if let Some(handler) = self.handler.take() {
handler.notify(self.when);
}
}
}
/// An object that can perform hit-testing without doing synchronous queries to
/// the RenderBackendThread.
pub trait ApiHitTester: Send + Sync {
/// Does a hit test on display items in the specified document, at the given
/// point. The vector of hit results will contain all display items that match,
/// ordered from front to back.
fn hit_test(&self, point: WorldPoint) -> HitTestResult;
}
/// A hit tester requested to the render backend thread but not necessarily ready yet.
///
/// The request should be resolved as late as possible to reduce the likelihood of blocking.
pub struct HitTesterRequest {
#[doc(hidden)]
pub rx: Receiver<Arc<dyn ApiHitTester>>,
}
impl HitTesterRequest {
/// Block until the hit tester is available and return it, consuming teh request.
pub fn resolve(self) -> Arc<dyn ApiHitTester> {
self.rx.recv().unwrap()
}
}
/// Describe an item that matched a hit-test query.
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct HitTestItem {
/// The pipeline that the display item that was hit belongs to.
pub pipeline: PipelineId,
/// The tag of the hit display item.
pub tag: ItemTag,
}
/// Returned by `RenderApi::hit_test`.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct HitTestResult {
/// List of items that are match the hit-test query.
pub items: Vec<HitTestItem>,
}
impl Drop for NotificationRequest {
fn drop(&mut self) {
if let Some(ref mut handler) = self.handler {
handler.notify(Checkpoint::TransactionDropped);
}
}
}
// This Clone impl yields an "empty" request because we don't want the requests
// to be notified twice so the request is owned by only one of the API messages
// (the original one) after the clone.
// This works in practice because the notifications requests are used for
// synchronization so we don't need to include them in the recording mechanism
// in wrench that clones the messages.
impl Clone for NotificationRequest {
fn clone(&self) -> Self {
NotificationRequest {
when: self.when,
handler: None,
}
}
}
/// A key to identify an animated property binding.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Deserialize, MallocSizeOf, PartialEq, Serialize, Eq, Hash, PeekPoke)]
pub struct PropertyBindingId {
pub namespace: IdNamespace,
pub uid: u32,
}
impl PropertyBindingId {
/// Constructor.
pub fn new(value: u64) -> Self {
PropertyBindingId {
namespace: IdNamespace((value >> 32) as u32),
uid: value as u32,
}
}
}
/// A unique key that is used for connecting animated property
/// values to bindings in the display list.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct PropertyBindingKey<T> {
///
pub id: PropertyBindingId,
#[doc(hidden)]
pub _phantom: PhantomData<T>,
}
/// Construct a property value from a given key and value.
impl<T: Copy> PropertyBindingKey<T> {
///
pub fn with(self, value: T) -> PropertyValue<T> {
PropertyValue { key: self, value }
}
}
impl<T> PropertyBindingKey<T> {
/// Constructor.
pub fn new(value: u64) -> Self {
PropertyBindingKey {
id: PropertyBindingId::new(value),
_phantom: PhantomData,
}
}
}
/// A binding property can either be a specific value
/// (the normal, non-animated case) or point to a binding location
/// to fetch the current value from.
/// Note that Binding has also a non-animated value, the value is
/// used for the case where the animation is still in-delay phase
/// (i.e. the animation doesn't produce any animation values).
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub enum PropertyBinding<T> {
/// Non-animated value.
Value(T),
/// Animated binding.
Binding(PropertyBindingKey<T>, T),
}
impl<T: Default> Default for PropertyBinding<T> {
fn default() -> Self {
PropertyBinding::Value(Default::default())
}
}
impl<T> From<T> for PropertyBinding<T> {
fn from(value: T) -> PropertyBinding<T> {
PropertyBinding::Value(value)
}
}
impl From<PropertyBindingKey<ColorF>> for PropertyBindingKey<ColorU> {
fn from(key: PropertyBindingKey<ColorF>) -> PropertyBindingKey<ColorU> {
PropertyBindingKey {
id: key.id.clone(),
_phantom: PhantomData,
}
}
}
impl From<PropertyBindingKey<ColorU>> for PropertyBindingKey<ColorF> {
fn from(key: PropertyBindingKey<ColorU>) -> PropertyBindingKey<ColorF> {
PropertyBindingKey {
id: key.id.clone(),
_phantom: PhantomData,
}
}
}
impl From<PropertyBinding<ColorF>> for PropertyBinding<ColorU> {
fn from(value: PropertyBinding<ColorF>) -> PropertyBinding<ColorU> {
match value {
PropertyBinding::Value(value) => PropertyBinding::Value(value.into()),
PropertyBinding::Binding(k, v) => |
}
}
}
impl From<PropertyBinding<ColorU>> for PropertyBinding<ColorF> {
fn from(value: PropertyBinding<ColorU>) -> PropertyBinding<ColorF> {
match value {
PropertyBinding::Value(value) => PropertyBinding::Value(value.into()),
PropertyBinding::Binding(k, v) => {
PropertyBinding::Binding(k.into(), v.into())
}
}
}
}
/// The current value of an animated property. This is
/// supplied by the calling code.
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq)]
pub struct PropertyValue<T> {
///
pub key: PropertyBindingKey<T>,
///
pub value: T,
}
/// When using `generate_frame()`, a list of `PropertyValue` structures
/// can optionally be supplied to provide the current value of any
/// animated properties.
#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Default)]
pub struct DynamicProperties {
/// transform list
pub transforms: Vec<PropertyValue<LayoutTransform>>,
/// opacity
pub floats: Vec<PropertyValue<f32>>,
/// background color
pub colors: Vec<PropertyValue<ColorF>>,
}
impl DynamicProperties {
/// Extend the properties.
pub fn extend(&mut self, other: Self) {
self.transforms.extend(other.transforms);
self.floats.extend(other.floats);
self.colors.extend(other.colors);
}
}
/// A C function that takes a pointer to a heap allocation and returns its size.
///
/// This is borrowed from the malloc_size_of crate, upon which we want to avoid
/// a dependency from WebRender.
pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize;
/// A configuration option that can be changed at runtime.
///
/// # Adding a new configuration option
///
/// - Add a new enum variant here.
/// - Add the entry in WR_BOOL_PARAMETER_LIST in gfxPlatform.cpp.
/// - React to the parameter change anywhere in WebRender where a SetParam message is received.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Parameter {
Bool(BoolParameter, bool),
Int(IntParameter, i32),
}
/// Boolean configuration option.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
pub enum BoolParameter {
PboUploads = 0,
Multithreading = 1,
BatchedUploads = 2,
DrawCallsForTextureCopy = 3,
}
/// Integer configuration option.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
pub enum IntParameter {
BatchedUploadThreshold = 0,
}
bitflags! {
/// Flags to track why we are rendering.
#[repr(C)]
#[derive(Default, Deserialize, MallocSizeOf, Serialize)]
pub struct RenderReasons: u32 {
/// Equivalent of empty() for the C++ side.
const NONE = 0;
const SCENE = 1 << 0;
const ANIMATED_PROPERTY = 1 << 1;
const RESOURCE_UPDATE = 1 << 2;
const ASYNC_IMAGE = 1 << 3;
const CLEAR_RESOURCES = 1 << 4;
const APZ = 1 << 5;
/// Window resize
const RESIZE = 1 << 6;
/// Various widget-related reasons
const WIDGET = 1 << 7;
/// See Frame::must_be_drawn
const TEXTURE_CACHE_FLUSH = 1 << 8;
const SNAPSHOT = 1 << 9;
const POST_RESOURCE_UPDATES_HOOK = 1 << 10;
const CONFIG_CHANGE = 1 << 11;
const CONTENT_SYNC = 1 << 12;
const FLUSH = 1 << 13;
const TESTING = 1 << 14;
const OTHER = 1 << 15;
/// Vsync isn't actually "why" we render but it can be useful
/// to see which frames were driven by the vsync scheduler so
/// we store a bit for it.
const VSYNC = 1 << 16;
}
}
impl RenderReasons {
pub const NUM_BITS: u32 = 17;
}
bitflags! {
/// Flags to enable/disable various builtin debugging tools.
#[repr(C)]
#[derive(Default, Deserialize, MallocSizeOf, Serialize)]
pub struct DebugFlags: u32 {
/// Display the frame profiler on screen.
const PROFILER_DBG = 1 << 0;
/// Display intermediate render targets on screen.
const RENDER_TARGET_DBG = 1 << 1;
/// Display all texture cache pages on screen.
const TEXTURE_CACHE_DBG = 1 << 2;
/// Display GPU timing results.
const GPU_TIME_QUERIES = 1 << 3;
/// Query the number of pixels that pass the depth test divided and show it
/// in the profiler as a percentage of the number of pixels in the screen
/// (window width times height).
const GPU_SAMPLE_QUERIES = 1 << 4;
/// Render each quad with their own draw call.
///
/// Terrible for performance but can help with understanding the drawing
/// order when inspecting renderdoc or apitrace recordings.
const DISABLE_BATCHING = 1 << 5;
/// Display the pipeline epochs.
const EPOCHS = 1 << 6;
/// Print driver messages to stdout.
const ECHO_DRIVER_MESSAGES = 1 << 7;
/// Show an overlay displaying overdraw amount.
const SHOW_OVERDRAW = 1 << 8;
/// Display the contents of GPU cache.
const GPU_CACHE_DBG = 1 << 9;
/// Clear evicted parts of the texture cache for debugging purposes.
const TEXTURE_CACHE_DBG_CLEAR_EVICTED = 1 << 10;
/// Show picture caching debug overlay
const PICTURE_CACHING_DBG = 1 << 11;
/// Highlight all primitives with colors based on kind.
const PRIMITIVE_DBG = 1 << 12;
/// Draw a zoom widget showing part of the framebuffer zoomed in.
const ZOOM_DBG = 1 << 13;
/// Scale the debug renderer down for a smaller screen. This will disrupt
/// any mapping between debug display items and page content, so shouldn't
/// be used with overlays like the picture caching or primitive display.
const SMALL_SCREEN = 1 << 14;
/// Disable various bits of the WebRender pipeline, to help narrow
/// down where slowness might be coming from.
const DISABLE_OPAQUE_PASS = 1 << 15;
///
const DISABLE_ALPHA_PASS = 1 << 16;
///
const DISABLE_CLIP_MASKS = 1 << 17;
///
const DISABLE_TEXT_PRIMS = 1 << 18;
///
const DISABLE_GRADIENT_PRIMS = 1 << 19;
///
const OBSCURE_IMAGES = 1 << 20;
/// Taint the transparent area of the glyphs with a random opacity to easily
/// see when glyphs are re-rasterized.
const GLYPH_FLASHING = 1 << 21;
/// The profiler only displays information that is out of the ordinary.
const SMART_PROFILER = 1 << 22;
/// If set, dump picture cache invalidation debug to console.
const INVALIDATION_DBG = 1 << 23;
/// Collect and dump profiler statistics to captures.
const PROFILER_CAPTURE = (1 as u32) << 25; // need "as u32" until we have cbindgen#556
/// Invalidate picture tiles every frames (useful when inspecting GPU work in external tools).
const FORCE_PICTURE_INVALIDATION = (1 as u32) << 26;
/// Display window visibility on screen.
const WINDOW_VISIBILITY_DBG = 1 << 27;
}
}
/// Information specific to a primitive type that
/// uniquely identifies a primitive template by key.
#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash, Serialize, Deserialize)]
pub enum PrimitiveKeyKind {
/// Clear an existing rect, used for special effects on some platforms.
Clear,
///
Rectangle {
///
color: PropertyBinding<ColorU>,
},
}
///
#[derive(Clone, Copy, Debug)]
pub enum ScrollLocation {
/// Scroll by a certain amount.
Delta(LayoutVector2D),
/// Scroll to very top of element.
Start,
/// Scroll to very bottom of element.
End,
}
/// Crash annotations included in crash reports.
#[repr(C)]
#[derive(Clone, Copy)]
pub enum CrashAnnotation {
CompileShader = 0,
DrawShader = 1,
}
/// Handler to expose support for annotating crash reports.
pub trait CrashAnnotator : Send {
fn set(&self, annotation: CrashAnnotation, value: &std::ffi::CStr);
fn clear(&self, annotation: CrashAnnotation);
fn box_clone(&self) -> Box<dyn CrashAnnotator>;
}
impl Clone for Box<dyn CrashAnnotator> {
fn clone(&self) -> Box<dyn CrashAnnotator> {
self.box_clone()
}
}
/// Guard to add a crash annotation at creation, and clear it at destruction.
pub struct CrashAnnotatorGuard<'a> {
annotator: &'a Option<Box<dyn CrashAnnotator>>,
annotation: CrashAnnotation,
}
impl<'a> CrashAnnotatorGuard<'a> {
pub fn new(
annotator: &'a Option<Box<dyn CrashAnnotator>>,
annotation: CrashAnnotation,
value: &std::ffi::CStr,
) -> Self {
if let Some(ref annotator) = annotator {
annotator.set(annotation, value);
}
Self {
annotator,
annotation,
}
}
}
impl<'a> Drop for CrashAnnotatorGuard<'a> {
fn drop(&mut self) {
if let Some(ref annotator) = self.annotator {
annotator.clear(self.annotation);
}
}
}
| {
PropertyBinding::Binding(k.into(), v.into())
} | conditional_block |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `webrender_api` crate contains an assortment types and functions used
//! by WebRender consumers as well as, in many cases, WebRender itself.
//!
//! This separation allows Servo to parallelize compilation across `webrender`
//! and other crates that depend on `webrender_api`. So in practice, we put
//! things in this crate when Servo needs to use them. Firefox depends on the
//! `webrender` crate directly, and so this distinction is not really relevant
//! there.
#![cfg_attr(feature = "nightly", feature(nonzero))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::float_cmp, clippy::too_many_arguments))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal, clippy::new_without_default))]
pub extern crate crossbeam_channel;
pub extern crate euclid;
extern crate app_units;
#[macro_use]
extern crate bitflags;
extern crate byteorder;
#[cfg(feature = "nightly")]
extern crate core;
extern crate derive_more;
#[macro_use]
extern crate malloc_size_of_derive;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate time;
| mod display_item;
mod display_item_cache;
mod display_list;
mod font;
mod gradient_builder;
mod image;
pub mod units;
pub use crate::color::*;
pub use crate::display_item::*;
pub use crate::display_item_cache::DisplayItemCache;
pub use crate::display_list::*;
pub use crate::font::*;
pub use crate::gradient_builder::*;
pub use crate::image::*;
use crate::units::*;
use crate::channel::Receiver;
use std::marker::PhantomData;
use std::sync::Arc;
use std::os::raw::c_void;
use peek_poke::PeekPoke;
/// Defined here for cbindgen
pub const MAX_RENDER_TASK_SIZE: i32 = 16384;
/// Width and height in device pixels of image tiles.
pub type TileSize = u16;
/// Various settings that the caller can select based on desired tradeoffs
/// between rendering quality and performance / power usage.
#[derive(Copy, Clone, Deserialize, Serialize)]
pub struct QualitySettings {
/// If true, disable creating separate picture cache slices when the
/// scroll root changes. This gives maximum opportunity to find an
/// opaque background, which enables subpixel AA. However, it is
/// usually significantly more expensive to render when scrolling.
pub force_subpixel_aa_where_possible: bool,
}
impl Default for QualitySettings {
fn default() -> Self {
QualitySettings {
// Prefer performance over maximum subpixel AA quality, since WR
// already enables subpixel AA in more situations than other browsers.
force_subpixel_aa_where_possible: false,
}
}
}
/// An epoch identifies the state of a pipeline in time.
///
/// This is mostly used as a synchronization mechanism to observe how/when particular pipeline
/// updates propagate through WebRender and are applied at various stages.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct Epoch(pub u32);
impl Epoch {
/// Magic invalid epoch value.
pub fn invalid() -> Epoch {
Epoch(u32::MAX)
}
}
/// ID namespaces uniquely identify different users of WebRender's API.
///
/// For example in Gecko each content process uses a separate id namespace.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Eq, MallocSizeOf, PartialEq, Hash, Ord, PartialOrd, PeekPoke)]
#[derive(Deserialize, Serialize)]
pub struct IdNamespace(pub u32);
/// A key uniquely identifying a WebRender document.
///
/// Instances can manage one or several documents (using the same render backend thread).
/// Each document will internally correspond to a single scene, and scenes are made of
/// one or several pipelines.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct DocumentId {
///
pub namespace_id: IdNamespace,
///
pub id: u32,
}
impl DocumentId {
///
pub fn new(namespace_id: IdNamespace, id: u32) -> Self {
DocumentId {
namespace_id,
id,
}
}
///
pub const INVALID: DocumentId = DocumentId { namespace_id: IdNamespace(0), id: 0 };
}
/// This type carries no valuable semantics for WR. However, it reflects the fact that
/// clients (Servo) may generate pipelines by different semi-independent sources.
/// These pipelines still belong to the same `IdNamespace` and the same `DocumentId`.
/// Having this extra Id field enables them to generate `PipelineId` without collision.
pub type PipelineSourceId = u32;
/// From the point of view of WR, `PipelineId` is completely opaque and generic as long as
/// it's clonable, serializable, comparable, and hashable.
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct PipelineId(pub PipelineSourceId, pub u32);
impl Default for PipelineId {
fn default() -> Self {
PipelineId::dummy()
}
}
impl PipelineId {
///
pub fn dummy() -> Self {
PipelineId(!0,!0)
}
}
/// An opaque pointer-sized value.
#[repr(C)]
#[derive(Clone)]
pub struct ExternalEvent {
raw: usize,
}
unsafe impl Send for ExternalEvent {}
impl ExternalEvent {
/// Creates the event from an opaque pointer-sized value.
pub fn from_raw(raw: usize) -> Self {
ExternalEvent { raw }
}
/// Consumes self to make it obvious that the event should be forwarded only once.
pub fn unwrap(self) -> usize {
self.raw
}
}
pub type APZScrollGeneration = u64;
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize, Default)]
pub struct SampledScrollOffset {
pub offset: LayoutVector2D,
pub generation: APZScrollGeneration,
}
/// A flag in each scrollable frame to represent whether the owner of the frame document
/// has any scroll-linked effect.
/// See https://firefox-source-docs.mozilla.org/performance/scroll-linked_effects.html
/// for a definition of scroll-linked effect.
#[repr(u8)]
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize, PeekPoke)]
pub enum HasScrollLinkedEffect {
Yes,
No,
}
impl Default for HasScrollLinkedEffect {
fn default() -> Self {
HasScrollLinkedEffect::No
}
}
/// A handler to integrate WebRender with the thread that contains the `Renderer`.
pub trait RenderNotifier: Send {
///
fn clone(&self) -> Box<dyn RenderNotifier>;
/// Wake the thread containing the `Renderer` up (after updates have been put
/// in the renderer's queue).
fn wake_up(
&self,
composite_needed: bool,
);
/// Notify the thread containing the `Renderer` that a new frame is ready.
fn new_frame_ready(&self, _: DocumentId, scrolled: bool, composite_needed: bool, render_time_ns: Option<u64>);
/// A Gecko-specific notification mechanism to get some code executed on the
/// `Renderer`'s thread, mostly replaced by `NotificationHandler`. You should
/// probably use the latter instead.
fn external_event(&self, _evt: ExternalEvent) {
unimplemented!()
}
/// Notify the thread containing the `Renderer` that the render backend has been
/// shut down.
fn shut_down(&self) {}
}
/// A stage of the rendering pipeline.
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Checkpoint {
///
SceneBuilt,
///
FrameBuilt,
///
FrameTexturesUpdated,
///
FrameRendered,
/// NotificationRequests get notified with this if they get dropped without having been
/// notified. This provides the guarantee that if a request is created it will get notified.
TransactionDropped,
}
/// A handler to notify when a transaction reaches certain stages of the rendering
/// pipeline.
pub trait NotificationHandler : Send + Sync {
/// Entry point of the handler to implement. Invoked by WebRender.
fn notify(&self, when: Checkpoint);
}
/// A request to notify a handler when the transaction reaches certain stages of the
/// rendering pipeline.
///
/// The request is guaranteed to be notified once and only once, even if the transaction
/// is dropped before the requested check-point.
pub struct NotificationRequest {
handler: Option<Box<dyn NotificationHandler>>,
when: Checkpoint,
}
impl NotificationRequest {
/// Constructor.
pub fn new(when: Checkpoint, handler: Box<dyn NotificationHandler>) -> Self {
NotificationRequest {
handler: Some(handler),
when,
}
}
/// The specified stage at which point the handler should be notified.
pub fn when(&self) -> Checkpoint { self.when }
/// Called by WebRender at specified stages to notify the registered handler.
pub fn notify(mut self) {
if let Some(handler) = self.handler.take() {
handler.notify(self.when);
}
}
}
/// An object that can perform hit-testing without doing synchronous queries to
/// the RenderBackendThread.
pub trait ApiHitTester: Send + Sync {
/// Does a hit test on display items in the specified document, at the given
/// point. The vector of hit results will contain all display items that match,
/// ordered from front to back.
fn hit_test(&self, point: WorldPoint) -> HitTestResult;
}
/// A hit tester requested to the render backend thread but not necessarily ready yet.
///
/// The request should be resolved as late as possible to reduce the likelihood of blocking.
pub struct HitTesterRequest {
#[doc(hidden)]
pub rx: Receiver<Arc<dyn ApiHitTester>>,
}
impl HitTesterRequest {
/// Block until the hit tester is available and return it, consuming teh request.
pub fn resolve(self) -> Arc<dyn ApiHitTester> {
self.rx.recv().unwrap()
}
}
/// Describe an item that matched a hit-test query.
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct HitTestItem {
/// The pipeline that the display item that was hit belongs to.
pub pipeline: PipelineId,
/// The tag of the hit display item.
pub tag: ItemTag,
}
/// Returned by `RenderApi::hit_test`.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct HitTestResult {
/// List of items that are match the hit-test query.
pub items: Vec<HitTestItem>,
}
impl Drop for NotificationRequest {
fn drop(&mut self) {
if let Some(ref mut handler) = self.handler {
handler.notify(Checkpoint::TransactionDropped);
}
}
}
// This Clone impl yields an "empty" request because we don't want the requests
// to be notified twice so the request is owned by only one of the API messages
// (the original one) after the clone.
// This works in practice because the notifications requests are used for
// synchronization so we don't need to include them in the recording mechanism
// in wrench that clones the messages.
impl Clone for NotificationRequest {
fn clone(&self) -> Self {
NotificationRequest {
when: self.when,
handler: None,
}
}
}
/// A key to identify an animated property binding.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Deserialize, MallocSizeOf, PartialEq, Serialize, Eq, Hash, PeekPoke)]
pub struct PropertyBindingId {
pub namespace: IdNamespace,
pub uid: u32,
}
impl PropertyBindingId {
/// Constructor.
pub fn new(value: u64) -> Self {
PropertyBindingId {
namespace: IdNamespace((value >> 32) as u32),
uid: value as u32,
}
}
}
/// A unique key that is used for connecting animated property
/// values to bindings in the display list.
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub struct PropertyBindingKey<T> {
///
pub id: PropertyBindingId,
#[doc(hidden)]
pub _phantom: PhantomData<T>,
}
/// Construct a property value from a given key and value.
impl<T: Copy> PropertyBindingKey<T> {
///
pub fn with(self, value: T) -> PropertyValue<T> {
PropertyValue { key: self, value }
}
}
impl<T> PropertyBindingKey<T> {
/// Constructor.
pub fn new(value: u64) -> Self {
PropertyBindingKey {
id: PropertyBindingId::new(value),
_phantom: PhantomData,
}
}
}
/// A binding property can either be a specific value
/// (the normal, non-animated case) or point to a binding location
/// to fetch the current value from.
/// Note that Binding has also a non-animated value, the value is
/// used for the case where the animation is still in-delay phase
/// (i.e. the animation doesn't produce any animation values).
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
pub enum PropertyBinding<T> {
/// Non-animated value.
Value(T),
/// Animated binding.
Binding(PropertyBindingKey<T>, T),
}
impl<T: Default> Default for PropertyBinding<T> {
fn default() -> Self {
PropertyBinding::Value(Default::default())
}
}
impl<T> From<T> for PropertyBinding<T> {
fn from(value: T) -> PropertyBinding<T> {
PropertyBinding::Value(value)
}
}
impl From<PropertyBindingKey<ColorF>> for PropertyBindingKey<ColorU> {
fn from(key: PropertyBindingKey<ColorF>) -> PropertyBindingKey<ColorU> {
PropertyBindingKey {
id: key.id.clone(),
_phantom: PhantomData,
}
}
}
impl From<PropertyBindingKey<ColorU>> for PropertyBindingKey<ColorF> {
fn from(key: PropertyBindingKey<ColorU>) -> PropertyBindingKey<ColorF> {
PropertyBindingKey {
id: key.id.clone(),
_phantom: PhantomData,
}
}
}
impl From<PropertyBinding<ColorF>> for PropertyBinding<ColorU> {
fn from(value: PropertyBinding<ColorF>) -> PropertyBinding<ColorU> {
match value {
PropertyBinding::Value(value) => PropertyBinding::Value(value.into()),
PropertyBinding::Binding(k, v) => {
PropertyBinding::Binding(k.into(), v.into())
}
}
}
}
impl From<PropertyBinding<ColorU>> for PropertyBinding<ColorF> {
fn from(value: PropertyBinding<ColorU>) -> PropertyBinding<ColorF> {
match value {
PropertyBinding::Value(value) => PropertyBinding::Value(value.into()),
PropertyBinding::Binding(k, v) => {
PropertyBinding::Binding(k.into(), v.into())
}
}
}
}
/// The current value of an animated property. This is
/// supplied by the calling code.
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq)]
pub struct PropertyValue<T> {
///
pub key: PropertyBindingKey<T>,
///
pub value: T,
}
/// When using `generate_frame()`, a list of `PropertyValue` structures
/// can optionally be supplied to provide the current value of any
/// animated properties.
#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Default)]
pub struct DynamicProperties {
/// transform list
pub transforms: Vec<PropertyValue<LayoutTransform>>,
/// opacity
pub floats: Vec<PropertyValue<f32>>,
/// background color
pub colors: Vec<PropertyValue<ColorF>>,
}
impl DynamicProperties {
/// Extend the properties.
pub fn extend(&mut self, other: Self) {
self.transforms.extend(other.transforms);
self.floats.extend(other.floats);
self.colors.extend(other.colors);
}
}
/// A C function that takes a pointer to a heap allocation and returns its size.
///
/// This is borrowed from the malloc_size_of crate, upon which we want to avoid
/// a dependency from WebRender.
pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize;
/// A configuration option that can be changed at runtime.
///
/// # Adding a new configuration option
///
/// - Add a new enum variant here.
/// - Add the entry in WR_BOOL_PARAMETER_LIST in gfxPlatform.cpp.
/// - React to the parameter change anywhere in WebRender where a SetParam message is received.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Parameter {
Bool(BoolParameter, bool),
Int(IntParameter, i32),
}
/// Boolean configuration option.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
pub enum BoolParameter {
PboUploads = 0,
Multithreading = 1,
BatchedUploads = 2,
DrawCallsForTextureCopy = 3,
}
/// Integer configuration option.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
pub enum IntParameter {
BatchedUploadThreshold = 0,
}
bitflags! {
/// Flags to track why we are rendering.
#[repr(C)]
#[derive(Default, Deserialize, MallocSizeOf, Serialize)]
pub struct RenderReasons: u32 {
/// Equivalent of empty() for the C++ side.
const NONE = 0;
const SCENE = 1 << 0;
const ANIMATED_PROPERTY = 1 << 1;
const RESOURCE_UPDATE = 1 << 2;
const ASYNC_IMAGE = 1 << 3;
const CLEAR_RESOURCES = 1 << 4;
const APZ = 1 << 5;
/// Window resize
const RESIZE = 1 << 6;
/// Various widget-related reasons
const WIDGET = 1 << 7;
/// See Frame::must_be_drawn
const TEXTURE_CACHE_FLUSH = 1 << 8;
const SNAPSHOT = 1 << 9;
const POST_RESOURCE_UPDATES_HOOK = 1 << 10;
const CONFIG_CHANGE = 1 << 11;
const CONTENT_SYNC = 1 << 12;
const FLUSH = 1 << 13;
const TESTING = 1 << 14;
const OTHER = 1 << 15;
/// Vsync isn't actually "why" we render but it can be useful
/// to see which frames were driven by the vsync scheduler so
/// we store a bit for it.
const VSYNC = 1 << 16;
}
}
impl RenderReasons {
pub const NUM_BITS: u32 = 17;
}
bitflags! {
/// Flags to enable/disable various builtin debugging tools.
#[repr(C)]
#[derive(Default, Deserialize, MallocSizeOf, Serialize)]
pub struct DebugFlags: u32 {
/// Display the frame profiler on screen.
const PROFILER_DBG = 1 << 0;
/// Display intermediate render targets on screen.
const RENDER_TARGET_DBG = 1 << 1;
/// Display all texture cache pages on screen.
const TEXTURE_CACHE_DBG = 1 << 2;
/// Display GPU timing results.
const GPU_TIME_QUERIES = 1 << 3;
/// Query the number of pixels that pass the depth test divided and show it
/// in the profiler as a percentage of the number of pixels in the screen
/// (window width times height).
const GPU_SAMPLE_QUERIES = 1 << 4;
/// Render each quad with their own draw call.
///
/// Terrible for performance but can help with understanding the drawing
/// order when inspecting renderdoc or apitrace recordings.
const DISABLE_BATCHING = 1 << 5;
/// Display the pipeline epochs.
const EPOCHS = 1 << 6;
/// Print driver messages to stdout.
const ECHO_DRIVER_MESSAGES = 1 << 7;
/// Show an overlay displaying overdraw amount.
const SHOW_OVERDRAW = 1 << 8;
/// Display the contents of GPU cache.
const GPU_CACHE_DBG = 1 << 9;
/// Clear evicted parts of the texture cache for debugging purposes.
const TEXTURE_CACHE_DBG_CLEAR_EVICTED = 1 << 10;
/// Show picture caching debug overlay
const PICTURE_CACHING_DBG = 1 << 11;
/// Highlight all primitives with colors based on kind.
const PRIMITIVE_DBG = 1 << 12;
/// Draw a zoom widget showing part of the framebuffer zoomed in.
const ZOOM_DBG = 1 << 13;
/// Scale the debug renderer down for a smaller screen. This will disrupt
/// any mapping between debug display items and page content, so shouldn't
/// be used with overlays like the picture caching or primitive display.
const SMALL_SCREEN = 1 << 14;
/// Disable various bits of the WebRender pipeline, to help narrow
/// down where slowness might be coming from.
const DISABLE_OPAQUE_PASS = 1 << 15;
///
const DISABLE_ALPHA_PASS = 1 << 16;
///
const DISABLE_CLIP_MASKS = 1 << 17;
///
const DISABLE_TEXT_PRIMS = 1 << 18;
///
const DISABLE_GRADIENT_PRIMS = 1 << 19;
///
const OBSCURE_IMAGES = 1 << 20;
/// Taint the transparent area of the glyphs with a random opacity to easily
/// see when glyphs are re-rasterized.
const GLYPH_FLASHING = 1 << 21;
/// The profiler only displays information that is out of the ordinary.
const SMART_PROFILER = 1 << 22;
/// If set, dump picture cache invalidation debug to console.
const INVALIDATION_DBG = 1 << 23;
/// Collect and dump profiler statistics to captures.
const PROFILER_CAPTURE = (1 as u32) << 25; // need "as u32" until we have cbindgen#556
/// Invalidate picture tiles every frames (useful when inspecting GPU work in external tools).
const FORCE_PICTURE_INVALIDATION = (1 as u32) << 26;
/// Display window visibility on screen.
const WINDOW_VISIBILITY_DBG = 1 << 27;
}
}
/// Information specific to a primitive type that
/// uniquely identifies a primitive template by key.
#[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash, Serialize, Deserialize)]
pub enum PrimitiveKeyKind {
/// Clear an existing rect, used for special effects on some platforms.
Clear,
///
Rectangle {
///
color: PropertyBinding<ColorU>,
},
}
///
#[derive(Clone, Copy, Debug)]
pub enum ScrollLocation {
/// Scroll by a certain amount.
Delta(LayoutVector2D),
/// Scroll to very top of element.
Start,
/// Scroll to very bottom of element.
End,
}
/// Crash annotations included in crash reports.
#[repr(C)]
#[derive(Clone, Copy)]
pub enum CrashAnnotation {
CompileShader = 0,
DrawShader = 1,
}
/// Handler to expose support for annotating crash reports.
pub trait CrashAnnotator : Send {
fn set(&self, annotation: CrashAnnotation, value: &std::ffi::CStr);
fn clear(&self, annotation: CrashAnnotation);
fn box_clone(&self) -> Box<dyn CrashAnnotator>;
}
impl Clone for Box<dyn CrashAnnotator> {
fn clone(&self) -> Box<dyn CrashAnnotator> {
self.box_clone()
}
}
/// Guard to add a crash annotation at creation, and clear it at destruction.
pub struct CrashAnnotatorGuard<'a> {
annotator: &'a Option<Box<dyn CrashAnnotator>>,
annotation: CrashAnnotation,
}
impl<'a> CrashAnnotatorGuard<'a> {
pub fn new(
annotator: &'a Option<Box<dyn CrashAnnotator>>,
annotation: CrashAnnotation,
value: &std::ffi::CStr,
) -> Self {
if let Some(ref annotator) = annotator {
annotator.set(annotation, value);
}
Self {
annotator,
annotation,
}
}
}
impl<'a> Drop for CrashAnnotatorGuard<'a> {
fn drop(&mut self) {
if let Some(ref annotator) = self.annotator {
annotator.clear(self.annotation);
}
}
} | extern crate malloc_size_of;
extern crate peek_poke;
pub mod channel;
mod color; | random_line_split |
mod.rs | /*
(c) 2014 by Jeffrey Quesnelle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
pub struct Cpu { | pub a: u8,
pub x: u8,
pub y: u8,
pub p: u8
} | pub pc: u16,
pub sp: u8, | random_line_split |
mod.rs | /*
(c) 2014 by Jeffrey Quesnelle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
pub struct | {
pub pc: u16,
pub sp: u8,
pub a: u8,
pub x: u8,
pub y: u8,
pub p: u8
}
| Cpu | identifier_name |
errors1.rs | // errors1.rs
// This function refuses to generate text to be printed on a nametag if
// you pass it an empty string. It'd be nicer if it explained what the problem
// was, instead of just sometimes returning `None`. The 2nd test currently
// does not compile or pass, but it illustrates the behavior we would like
// this function to have.
// Execute `rustlings hint errors1` for hints!
// I AM NOT DONE
pub fn generate_nametag_text(name: String) -> Option<String> {
if name.len() > 0 {
Some(format!("Hi! My name is {}", name))
} else |
}
#[cfg(test)]
mod tests {
use super::*;
// This test passes initially if you comment out the 2nd test.
// You'll need to update what this test expects when you change
// the function under test!
#[test]
fn generates_nametag_text_for_a_nonempty_name() {
assert_eq!(
generate_nametag_text("Beyoncé".into()),
Some("Hi! My name is Beyoncé".into())
);
}
#[test]
fn explains_why_generating_nametag_text_fails() {
assert_eq!(
generate_nametag_text("".into()),
Err("`name` was empty; it must be nonempty.".into())
);
}
}
| {
// Empty names aren't allowed.
None
} | conditional_block |
errors1.rs | // errors1.rs
// This function refuses to generate text to be printed on a nametag if
// you pass it an empty string. It'd be nicer if it explained what the problem
// was, instead of just sometimes returning `None`. The 2nd test currently
// does not compile or pass, but it illustrates the behavior we would like
// this function to have.
// Execute `rustlings hint errors1` for hints!
// I AM NOT DONE
pub fn generate_nametag_text(name: String) -> Option<String> |
#[cfg(test)]
mod tests {
use super::*;
// This test passes initially if you comment out the 2nd test.
// You'll need to update what this test expects when you change
// the function under test!
#[test]
fn generates_nametag_text_for_a_nonempty_name() {
assert_eq!(
generate_nametag_text("Beyoncé".into()),
Some("Hi! My name is Beyoncé".into())
);
}
#[test]
fn explains_why_generating_nametag_text_fails() {
assert_eq!(
generate_nametag_text("".into()),
Err("`name` was empty; it must be nonempty.".into())
);
}
}
| {
if name.len() > 0 {
Some(format!("Hi! My name is {}", name))
} else {
// Empty names aren't allowed.
None
}
} | identifier_body |
errors1.rs | // errors1.rs
// This function refuses to generate text to be printed on a nametag if
// you pass it an empty string. It'd be nicer if it explained what the problem
// was, instead of just sometimes returning `None`. The 2nd test currently
// does not compile or pass, but it illustrates the behavior we would like
// this function to have.
// Execute `rustlings hint errors1` for hints!
// I AM NOT DONE
pub fn generate_nametag_text(name: String) -> Option<String> {
if name.len() > 0 {
Some(format!("Hi! My name is {}", name))
} else {
// Empty names aren't allowed.
None
}
}
#[cfg(test)]
mod tests {
use super::*;
// This test passes initially if you comment out the 2nd test.
// You'll need to update what this test expects when you change
// the function under test!
#[test]
fn generates_nametag_text_for_a_nonempty_name() {
assert_eq!(
generate_nametag_text("Beyoncé".into()),
Some("Hi! My name is Beyoncé".into())
);
}
#[test] | );
}
} | fn explains_why_generating_nametag_text_fails() {
assert_eq!(
generate_nametag_text("".into()),
Err("`name` was empty; it must be nonempty.".into()) | random_line_split |
errors1.rs | // errors1.rs
// This function refuses to generate text to be printed on a nametag if
// you pass it an empty string. It'd be nicer if it explained what the problem
// was, instead of just sometimes returning `None`. The 2nd test currently
// does not compile or pass, but it illustrates the behavior we would like
// this function to have.
// Execute `rustlings hint errors1` for hints!
// I AM NOT DONE
pub fn | (name: String) -> Option<String> {
if name.len() > 0 {
Some(format!("Hi! My name is {}", name))
} else {
// Empty names aren't allowed.
None
}
}
#[cfg(test)]
mod tests {
use super::*;
// This test passes initially if you comment out the 2nd test.
// You'll need to update what this test expects when you change
// the function under test!
#[test]
fn generates_nametag_text_for_a_nonempty_name() {
assert_eq!(
generate_nametag_text("Beyoncé".into()),
Some("Hi! My name is Beyoncé".into())
);
}
#[test]
fn explains_why_generating_nametag_text_fails() {
assert_eq!(
generate_nametag_text("".into()),
Err("`name` was empty; it must be nonempty.".into())
);
}
}
| generate_nametag_text | identifier_name |
xrview.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::XRViewBinding;
use crate::dom::bindings::codegen::Bindings::XRViewBinding::{XREye, XRViewMethods};
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::globalscope::GlobalScope;
use crate::dom::vrframedata::create_typed_array;
use crate::dom::xrsession::XRSession;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext, JSObject};
use std::ptr::NonNull;
use webvr_traits::WebVRFrameData;
#[dom_struct]
pub struct XRView {
reflector_: Reflector,
session: Dom<XRSession>,
eye: XREye,
proj: Heap<*mut JSObject>,
view: Heap<*mut JSObject>,
}
impl XRView {
fn new_inherited(session: &XRSession, eye: XREye) -> XRView {
XRView {
reflector_: Reflector::new(),
session: Dom::from_ref(session),
eye,
proj: Heap::default(),
view: Heap::default(),
}
}
#[allow(unsafe_code)]
pub fn new(
global: &GlobalScope,
session: &XRSession,
eye: XREye,
data: &WebVRFrameData,
) -> DomRoot<XRView> {
let ret = reflect_dom_object(
Box::new(XRView::new_inherited(session, eye)),
global,
XRViewBinding::Wrap,
);
let (proj, view) = if eye == XREye::Left {
(&data.left_projection_matrix, &data.left_view_matrix)
} else {
(&data.right_projection_matrix, &data.right_view_matrix)
};
let cx = global.get_cx();
unsafe {
create_typed_array(cx, proj, &ret.proj);
create_typed_array(cx, view, &ret.view);
}
ret
}
pub fn session(&self) -> &XRSession {
&self.session
} | /// https://immersive-web.github.io/webxr/#dom-xrview-eye
fn Eye(&self) -> XREye {
self.eye
}
#[allow(unsafe_code)]
/// https://immersive-web.github.io/webxr/#dom-xrview-projectionmatrix
unsafe fn ProjectionMatrix(&self, _cx: *mut JSContext) -> NonNull<JSObject> {
NonNull::new(self.proj.get()).unwrap()
}
#[allow(unsafe_code)]
/// https://immersive-web.github.io/webxr/#dom-xrview-projectionmatrix
unsafe fn ViewMatrix(&self, _cx: *mut JSContext) -> NonNull<JSObject> {
NonNull::new(self.view.get()).unwrap()
}
} | }
impl XRViewMethods for XRView { | random_line_split |
xrview.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::XRViewBinding;
use crate::dom::bindings::codegen::Bindings::XRViewBinding::{XREye, XRViewMethods};
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::globalscope::GlobalScope;
use crate::dom::vrframedata::create_typed_array;
use crate::dom::xrsession::XRSession;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext, JSObject};
use std::ptr::NonNull;
use webvr_traits::WebVRFrameData;
#[dom_struct]
pub struct XRView {
reflector_: Reflector,
session: Dom<XRSession>,
eye: XREye,
proj: Heap<*mut JSObject>,
view: Heap<*mut JSObject>,
}
impl XRView {
fn new_inherited(session: &XRSession, eye: XREye) -> XRView {
XRView {
reflector_: Reflector::new(),
session: Dom::from_ref(session),
eye,
proj: Heap::default(),
view: Heap::default(),
}
}
#[allow(unsafe_code)]
pub fn new(
global: &GlobalScope,
session: &XRSession,
eye: XREye,
data: &WebVRFrameData,
) -> DomRoot<XRView> {
let ret = reflect_dom_object(
Box::new(XRView::new_inherited(session, eye)),
global,
XRViewBinding::Wrap,
);
let (proj, view) = if eye == XREye::Left | else {
(&data.right_projection_matrix, &data.right_view_matrix)
};
let cx = global.get_cx();
unsafe {
create_typed_array(cx, proj, &ret.proj);
create_typed_array(cx, view, &ret.view);
}
ret
}
pub fn session(&self) -> &XRSession {
&self.session
}
}
impl XRViewMethods for XRView {
/// https://immersive-web.github.io/webxr/#dom-xrview-eye
fn Eye(&self) -> XREye {
self.eye
}
#[allow(unsafe_code)]
/// https://immersive-web.github.io/webxr/#dom-xrview-projectionmatrix
unsafe fn ProjectionMatrix(&self, _cx: *mut JSContext) -> NonNull<JSObject> {
NonNull::new(self.proj.get()).unwrap()
}
#[allow(unsafe_code)]
/// https://immersive-web.github.io/webxr/#dom-xrview-projectionmatrix
unsafe fn ViewMatrix(&self, _cx: *mut JSContext) -> NonNull<JSObject> {
NonNull::new(self.view.get()).unwrap()
}
}
| {
(&data.left_projection_matrix, &data.left_view_matrix)
} | conditional_block |
xrview.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::XRViewBinding;
use crate::dom::bindings::codegen::Bindings::XRViewBinding::{XREye, XRViewMethods};
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::globalscope::GlobalScope;
use crate::dom::vrframedata::create_typed_array;
use crate::dom::xrsession::XRSession;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext, JSObject};
use std::ptr::NonNull;
use webvr_traits::WebVRFrameData;
#[dom_struct]
pub struct XRView {
reflector_: Reflector,
session: Dom<XRSession>,
eye: XREye,
proj: Heap<*mut JSObject>,
view: Heap<*mut JSObject>,
}
impl XRView {
fn | (session: &XRSession, eye: XREye) -> XRView {
XRView {
reflector_: Reflector::new(),
session: Dom::from_ref(session),
eye,
proj: Heap::default(),
view: Heap::default(),
}
}
#[allow(unsafe_code)]
pub fn new(
global: &GlobalScope,
session: &XRSession,
eye: XREye,
data: &WebVRFrameData,
) -> DomRoot<XRView> {
let ret = reflect_dom_object(
Box::new(XRView::new_inherited(session, eye)),
global,
XRViewBinding::Wrap,
);
let (proj, view) = if eye == XREye::Left {
(&data.left_projection_matrix, &data.left_view_matrix)
} else {
(&data.right_projection_matrix, &data.right_view_matrix)
};
let cx = global.get_cx();
unsafe {
create_typed_array(cx, proj, &ret.proj);
create_typed_array(cx, view, &ret.view);
}
ret
}
pub fn session(&self) -> &XRSession {
&self.session
}
}
impl XRViewMethods for XRView {
/// https://immersive-web.github.io/webxr/#dom-xrview-eye
fn Eye(&self) -> XREye {
self.eye
}
#[allow(unsafe_code)]
/// https://immersive-web.github.io/webxr/#dom-xrview-projectionmatrix
unsafe fn ProjectionMatrix(&self, _cx: *mut JSContext) -> NonNull<JSObject> {
NonNull::new(self.proj.get()).unwrap()
}
#[allow(unsafe_code)]
/// https://immersive-web.github.io/webxr/#dom-xrview-projectionmatrix
unsafe fn ViewMatrix(&self, _cx: *mut JSContext) -> NonNull<JSObject> {
NonNull::new(self.view.get()).unwrap()
}
}
| new_inherited | identifier_name |
xrview.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::XRViewBinding;
use crate::dom::bindings::codegen::Bindings::XRViewBinding::{XREye, XRViewMethods};
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::globalscope::GlobalScope;
use crate::dom::vrframedata::create_typed_array;
use crate::dom::xrsession::XRSession;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext, JSObject};
use std::ptr::NonNull;
use webvr_traits::WebVRFrameData;
#[dom_struct]
pub struct XRView {
reflector_: Reflector,
session: Dom<XRSession>,
eye: XREye,
proj: Heap<*mut JSObject>,
view: Heap<*mut JSObject>,
}
impl XRView {
fn new_inherited(session: &XRSession, eye: XREye) -> XRView {
XRView {
reflector_: Reflector::new(),
session: Dom::from_ref(session),
eye,
proj: Heap::default(),
view: Heap::default(),
}
}
#[allow(unsafe_code)]
pub fn new(
global: &GlobalScope,
session: &XRSession,
eye: XREye,
data: &WebVRFrameData,
) -> DomRoot<XRView> {
let ret = reflect_dom_object(
Box::new(XRView::new_inherited(session, eye)),
global,
XRViewBinding::Wrap,
);
let (proj, view) = if eye == XREye::Left {
(&data.left_projection_matrix, &data.left_view_matrix)
} else {
(&data.right_projection_matrix, &data.right_view_matrix)
};
let cx = global.get_cx();
unsafe {
create_typed_array(cx, proj, &ret.proj);
create_typed_array(cx, view, &ret.view);
}
ret
}
pub fn session(&self) -> &XRSession {
&self.session
}
}
impl XRViewMethods for XRView {
/// https://immersive-web.github.io/webxr/#dom-xrview-eye
fn Eye(&self) -> XREye {
self.eye
}
#[allow(unsafe_code)]
/// https://immersive-web.github.io/webxr/#dom-xrview-projectionmatrix
unsafe fn ProjectionMatrix(&self, _cx: *mut JSContext) -> NonNull<JSObject> |
#[allow(unsafe_code)]
/// https://immersive-web.github.io/webxr/#dom-xrview-projectionmatrix
unsafe fn ViewMatrix(&self, _cx: *mut JSContext) -> NonNull<JSObject> {
NonNull::new(self.view.get()).unwrap()
}
}
| {
NonNull::new(self.proj.get()).unwrap()
} | identifier_body |
lib.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use diem_logger::debug;
use std::{cmp::min, future::Future, pin::Pin, thread, time::Duration};
/// Given an operation retries it successfully sleeping everytime it fails
/// If the operation succeeds before the iterator runs out, it returns success
pub fn retry<I, O, T, E>(iterable: I, mut operation: O) -> Result<T, E>
where
I: IntoIterator<Item = Duration>,
O: FnMut() -> Result<T, E>,
{
let mut iterator = iterable.into_iter();
loop {
match operation() {
Ok(value) => return Ok(value),
Err(err) => {
if let Some(delay) = iterator.next() {
thread::sleep(delay);
} else {
return Err(err);
}
}
}
}
}
pub async fn retry_async<'a, I, O, T, E>(iterable: I, mut operation: O) -> Result<T, E>
where
I: IntoIterator<Item = Duration>,
O: FnMut() -> Pin<Box<dyn Future<Output = Result<T, E>> + Send + 'a>>,
E: std::fmt::Display + std::fmt::Debug,
{
let mut iterator = iterable.into_iter();
loop {
match operation().await {
Ok(value) => return Ok(value),
Err(err) => {
if let Some(delay) = iterator.next() {
debug!("{}. Retrying in {} seconds..", err, delay.as_secs());
tokio::time::sleep(delay).await;
} else {
return Err(err);
}
}
}
}
}
pub fn fixed_retry_strategy(delay_ms: u64, tries: usize) -> impl Iterator<Item = Duration> {
FixedDelay::new(delay_ms).take(tries)
}
pub fn exp_retry_strategy(
start_ms: u64,
limit_ms: u64,
tries: usize,
) -> impl Iterator<Item = Duration> {
ExponentWithLimitDelay::new(start_ms, limit_ms).take(tries)
}
/// An iterator which uses a fixed delay
pub struct FixedDelay {
duration: Duration,
}
pub struct ExponentWithLimitDelay {
current: Duration,
limit: Duration,
exp: f64,
}
impl FixedDelay {
/// Create a new `FixedDelay` using the given duration in milliseconds.
fn new(millis: u64) -> Self {
FixedDelay {
duration: Duration::from_millis(millis),
}
}
}
impl ExponentWithLimitDelay {
fn new(start_ms: u64, limit_ms: u64) -> Self {
ExponentWithLimitDelay {
current: Duration::from_millis(start_ms),
limit: Duration::from_millis(limit_ms),
exp: 1.5,
}
}
}
impl Iterator for FixedDelay {
type Item = Duration;
fn | (&mut self) -> Option<Duration> {
Some(self.duration)
}
}
impl Iterator for ExponentWithLimitDelay {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
let duration = self.current;
self.current = min(
Duration::from_millis((self.current.as_millis() as f64 * self.exp) as u64),
self.limit,
);
Some(duration)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fixed_retry_strategy_success() {
let mut collection = vec![1, 2, 3, 4, 5].into_iter();
let result = retry(fixed_retry_strategy(0, 10), || match collection.next() {
Some(n) if n == 5 => Ok(n),
Some(_) => Err("not 5"),
None => Err("not 5"),
})
.unwrap();
assert_eq!(result, 5);
}
#[test]
fn test_fixed_retry_strategy_error() {
let mut collection = vec![1, 2, 3, 4, 5].into_iter();
let result = retry(fixed_retry_strategy(0, 3), || match collection.next() {
Some(n) if n == 5 => Ok(n),
Some(_) => Err("not 5"),
None => Err("not 5"),
});
assert_eq!(result, Err("not 5"));
}
}
| next | identifier_name |
lib.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use diem_logger::debug;
use std::{cmp::min, future::Future, pin::Pin, thread, time::Duration};
/// Given an operation retries it successfully sleeping everytime it fails
/// If the operation succeeds before the iterator runs out, it returns success
pub fn retry<I, O, T, E>(iterable: I, mut operation: O) -> Result<T, E>
where
I: IntoIterator<Item = Duration>,
O: FnMut() -> Result<T, E>,
{
let mut iterator = iterable.into_iter();
loop {
match operation() {
Ok(value) => return Ok(value),
Err(err) => {
if let Some(delay) = iterator.next() {
thread::sleep(delay);
} else {
return Err(err);
}
}
}
}
}
pub async fn retry_async<'a, I, O, T, E>(iterable: I, mut operation: O) -> Result<T, E>
where
I: IntoIterator<Item = Duration>,
O: FnMut() -> Pin<Box<dyn Future<Output = Result<T, E>> + Send + 'a>>,
E: std::fmt::Display + std::fmt::Debug,
{
let mut iterator = iterable.into_iter();
loop {
match operation().await {
Ok(value) => return Ok(value),
Err(err) => {
if let Some(delay) = iterator.next() {
debug!("{}. Retrying in {} seconds..", err, delay.as_secs());
tokio::time::sleep(delay).await;
} else {
return Err(err);
}
}
}
}
}
pub fn fixed_retry_strategy(delay_ms: u64, tries: usize) -> impl Iterator<Item = Duration> {
FixedDelay::new(delay_ms).take(tries)
}
pub fn exp_retry_strategy(
start_ms: u64,
limit_ms: u64,
tries: usize,
) -> impl Iterator<Item = Duration> {
ExponentWithLimitDelay::new(start_ms, limit_ms).take(tries)
}
/// An iterator which uses a fixed delay
pub struct FixedDelay {
duration: Duration,
}
pub struct ExponentWithLimitDelay {
current: Duration,
limit: Duration,
exp: f64,
}
impl FixedDelay {
/// Create a new `FixedDelay` using the given duration in milliseconds.
fn new(millis: u64) -> Self {
FixedDelay {
duration: Duration::from_millis(millis),
}
}
}
impl ExponentWithLimitDelay {
fn new(start_ms: u64, limit_ms: u64) -> Self {
ExponentWithLimitDelay {
current: Duration::from_millis(start_ms),
limit: Duration::from_millis(limit_ms),
exp: 1.5,
}
}
}
impl Iterator for FixedDelay {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
Some(self.duration)
}
}
impl Iterator for ExponentWithLimitDelay {
type Item = Duration;
fn next(&mut self) -> Option<Duration> |
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fixed_retry_strategy_success() {
let mut collection = vec![1, 2, 3, 4, 5].into_iter();
let result = retry(fixed_retry_strategy(0, 10), || match collection.next() {
Some(n) if n == 5 => Ok(n),
Some(_) => Err("not 5"),
None => Err("not 5"),
})
.unwrap();
assert_eq!(result, 5);
}
#[test]
fn test_fixed_retry_strategy_error() {
let mut collection = vec![1, 2, 3, 4, 5].into_iter();
let result = retry(fixed_retry_strategy(0, 3), || match collection.next() {
Some(n) if n == 5 => Ok(n),
Some(_) => Err("not 5"),
None => Err("not 5"),
});
assert_eq!(result, Err("not 5"));
}
}
| {
let duration = self.current;
self.current = min(
Duration::from_millis((self.current.as_millis() as f64 * self.exp) as u64),
self.limit,
);
Some(duration)
} | identifier_body |
lib.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use diem_logger::debug;
use std::{cmp::min, future::Future, pin::Pin, thread, time::Duration};
/// Given an operation retries it successfully sleeping everytime it fails
/// If the operation succeeds before the iterator runs out, it returns success
pub fn retry<I, O, T, E>(iterable: I, mut operation: O) -> Result<T, E>
where
I: IntoIterator<Item = Duration>,
O: FnMut() -> Result<T, E>,
{
let mut iterator = iterable.into_iter();
loop {
match operation() {
Ok(value) => return Ok(value),
Err(err) => {
if let Some(delay) = iterator.next() {
thread::sleep(delay);
} else {
return Err(err);
}
}
}
}
}
pub async fn retry_async<'a, I, O, T, E>(iterable: I, mut operation: O) -> Result<T, E>
where
I: IntoIterator<Item = Duration>,
O: FnMut() -> Pin<Box<dyn Future<Output = Result<T, E>> + Send + 'a>>,
E: std::fmt::Display + std::fmt::Debug,
{
let mut iterator = iterable.into_iter();
loop {
match operation().await {
Ok(value) => return Ok(value),
Err(err) => { | } else {
return Err(err);
}
}
}
}
}
pub fn fixed_retry_strategy(delay_ms: u64, tries: usize) -> impl Iterator<Item = Duration> {
FixedDelay::new(delay_ms).take(tries)
}
pub fn exp_retry_strategy(
start_ms: u64,
limit_ms: u64,
tries: usize,
) -> impl Iterator<Item = Duration> {
ExponentWithLimitDelay::new(start_ms, limit_ms).take(tries)
}
/// An iterator which uses a fixed delay
pub struct FixedDelay {
duration: Duration,
}
pub struct ExponentWithLimitDelay {
current: Duration,
limit: Duration,
exp: f64,
}
impl FixedDelay {
/// Create a new `FixedDelay` using the given duration in milliseconds.
fn new(millis: u64) -> Self {
FixedDelay {
duration: Duration::from_millis(millis),
}
}
}
impl ExponentWithLimitDelay {
fn new(start_ms: u64, limit_ms: u64) -> Self {
ExponentWithLimitDelay {
current: Duration::from_millis(start_ms),
limit: Duration::from_millis(limit_ms),
exp: 1.5,
}
}
}
impl Iterator for FixedDelay {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
Some(self.duration)
}
}
impl Iterator for ExponentWithLimitDelay {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
let duration = self.current;
self.current = min(
Duration::from_millis((self.current.as_millis() as f64 * self.exp) as u64),
self.limit,
);
Some(duration)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fixed_retry_strategy_success() {
let mut collection = vec![1, 2, 3, 4, 5].into_iter();
let result = retry(fixed_retry_strategy(0, 10), || match collection.next() {
Some(n) if n == 5 => Ok(n),
Some(_) => Err("not 5"),
None => Err("not 5"),
})
.unwrap();
assert_eq!(result, 5);
}
#[test]
fn test_fixed_retry_strategy_error() {
let mut collection = vec![1, 2, 3, 4, 5].into_iter();
let result = retry(fixed_retry_strategy(0, 3), || match collection.next() {
Some(n) if n == 5 => Ok(n),
Some(_) => Err("not 5"),
None => Err("not 5"),
});
assert_eq!(result, Err("not 5"));
}
} | if let Some(delay) = iterator.next() {
debug!("{}. Retrying in {} seconds..", err, delay.as_secs());
tokio::time::sleep(delay).await; | random_line_split |
rustiny-rulecomp.rs | //! TODO: Docs
#![feature(plugin)]
extern crate clap;
extern crate env_logger;
extern crate rustiny;
use clap::{Arg, App};
use rustiny::util::{read_file, write_file};
#[cfg(not(test))]
fn | () {
env_logger::init().unwrap();
// Parse arguments
let app = App::new("rustiny-rulecomp")
.version(env!("CARGO_PKG_VERSION"))
.author("Markus SIemens <[email protected]>")
.arg(Arg::with_name("output")
.short("o")
.value_name("OUTPUT")
.help("Sets the output file"))
.arg(Arg::with_name("INPUT")
.help("Sets the file to compile")
.required(true)
.index(1));
let args = app.get_matches();
// Read source file
let input_file = args.value_of("INPUT").unwrap();
let source = read_file(input_file);
// Compile rules
let rules = rustiny::back::compile_rules(&source, input_file);
if let Some(output_file) = args.value_of("output") {
write_file(output_file, &rules);
} else {
println!("{}", &rules)
}
} | main | identifier_name |
rustiny-rulecomp.rs | //! TODO: Docs
#![feature(plugin)]
extern crate clap;
extern crate env_logger;
extern crate rustiny;
use clap::{Arg, App};
use rustiny::util::{read_file, write_file};
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
// Parse arguments
let app = App::new("rustiny-rulecomp")
.version(env!("CARGO_PKG_VERSION"))
.author("Markus SIemens <[email protected]>")
.arg(Arg::with_name("output")
.short("o")
.value_name("OUTPUT")
.help("Sets the output file"))
.arg(Arg::with_name("INPUT")
.help("Sets the file to compile") |
// Read source file
let input_file = args.value_of("INPUT").unwrap();
let source = read_file(input_file);
// Compile rules
let rules = rustiny::back::compile_rules(&source, input_file);
if let Some(output_file) = args.value_of("output") {
write_file(output_file, &rules);
} else {
println!("{}", &rules)
}
} | .required(true)
.index(1));
let args = app.get_matches(); | random_line_split |
rustiny-rulecomp.rs | //! TODO: Docs
#![feature(plugin)]
extern crate clap;
extern crate env_logger;
extern crate rustiny;
use clap::{Arg, App};
use rustiny::util::{read_file, write_file};
#[cfg(not(test))]
fn main() | let input_file = args.value_of("INPUT").unwrap();
let source = read_file(input_file);
// Compile rules
let rules = rustiny::back::compile_rules(&source, input_file);
if let Some(output_file) = args.value_of("output") {
write_file(output_file, &rules);
} else {
println!("{}", &rules)
}
} | {
env_logger::init().unwrap();
// Parse arguments
let app = App::new("rustiny-rulecomp")
.version(env!("CARGO_PKG_VERSION"))
.author("Markus SIemens <[email protected]>")
.arg(Arg::with_name("output")
.short("o")
.value_name("OUTPUT")
.help("Sets the output file"))
.arg(Arg::with_name("INPUT")
.help("Sets the file to compile")
.required(true)
.index(1));
let args = app.get_matches();
// Read source file | identifier_body |
rustiny-rulecomp.rs | //! TODO: Docs
#![feature(plugin)]
extern crate clap;
extern crate env_logger;
extern crate rustiny;
use clap::{Arg, App};
use rustiny::util::{read_file, write_file};
#[cfg(not(test))]
fn main() {
env_logger::init().unwrap();
// Parse arguments
let app = App::new("rustiny-rulecomp")
.version(env!("CARGO_PKG_VERSION"))
.author("Markus SIemens <[email protected]>")
.arg(Arg::with_name("output")
.short("o")
.value_name("OUTPUT")
.help("Sets the output file"))
.arg(Arg::with_name("INPUT")
.help("Sets the file to compile")
.required(true)
.index(1));
let args = app.get_matches();
// Read source file
let input_file = args.value_of("INPUT").unwrap();
let source = read_file(input_file);
// Compile rules
let rules = rustiny::back::compile_rules(&source, input_file);
if let Some(output_file) = args.value_of("output") | else {
println!("{}", &rules)
}
} | {
write_file(output_file, &rules);
} | conditional_block |
floatf.rs | //! formatter for %f %F common-notation floating-point subs
use super::super::format_field::FormatField;
use super::super::formatter::{FormatPrimitive, Formatter, InPrefix};
use super::float_common::{get_primitive_dec, primitive_to_str_common, FloatAnalysis};
pub struct | {
as_num: f64,
}
impl Floatf {
pub fn new() -> Floatf {
Floatf { as_num: 0.0 }
}
}
impl Formatter for Floatf {
fn get_primitive(
&self,
field: &FormatField,
inprefix: &InPrefix,
str_in: &str,
) -> Option<FormatPrimitive> {
let second_field = field.second_field.unwrap_or(6) + 1;
let analysis =
FloatAnalysis::analyze(&str_in, inprefix, None, Some(second_field as usize), false);
let f = get_primitive_dec(
inprefix,
&str_in[inprefix.offset..],
&analysis,
second_field as usize,
None,
);
Some(f)
}
fn primitive_to_str(&self, prim: &FormatPrimitive, field: FormatField) -> String {
primitive_to_str_common(prim, &field)
}
}
| Floatf | identifier_name |
floatf.rs | //! formatter for %f %F common-notation floating-point subs
use super::super::format_field::FormatField;
use super::super::formatter::{FormatPrimitive, Formatter, InPrefix};
use super::float_common::{get_primitive_dec, primitive_to_str_common, FloatAnalysis};
pub struct Floatf {
as_num: f64,
}
impl Floatf {
pub fn new() -> Floatf {
Floatf { as_num: 0.0 }
}
}
impl Formatter for Floatf {
fn get_primitive(
&self,
field: &FormatField,
inprefix: &InPrefix,
str_in: &str,
) -> Option<FormatPrimitive> {
let second_field = field.second_field.unwrap_or(6) + 1;
let analysis =
FloatAnalysis::analyze(&str_in, inprefix, None, Some(second_field as usize), false);
let f = get_primitive_dec(
inprefix,
&str_in[inprefix.offset..],
&analysis,
second_field as usize,
None,
); | } | Some(f)
}
fn primitive_to_str(&self, prim: &FormatPrimitive, field: FormatField) -> String {
primitive_to_str_common(prim, &field)
} | random_line_split |
path.rs | use crate::factory::IFactory;
use crate::geometry::IGeometry;
use crate::resource::IResource;
use com_wrapper::ComWrapper;
use dcommon::Error;
use winapi::shared::winerror::SUCCEEDED;
use winapi::um::d2d1::{ID2D1Geometry, ID2D1PathGeometry, ID2D1Resource};
use wio::com::ComPtr;
pub use self::builder::*;
pub mod builder;
#[repr(transparent)]
#[derive(ComWrapper, Clone)]
#[com(send, sync, debug)]
/// Custom-shaped geometry made of lines and curves
pub struct PathGeometry {
ptr: ComPtr<ID2D1PathGeometry>,
}
impl PathGeometry {
pub fn create(factory: &dyn IFactory) -> Result<PathBuilder, Error> {
unsafe {
let mut ptr = std::ptr::null_mut();
let hr = factory.raw_f().CreatePathGeometry(&mut ptr);
if SUCCEEDED(hr) {
let path = PathGeometry::from_raw(ptr);
let mut ptr = std::ptr::null_mut();
let hr = path.ptr.Open(&mut ptr);
if SUCCEEDED(hr) {
Ok(PathBuilder {
path: path,
sink: ComPtr::from_raw(ptr),
})
} else {
Err(hr.into())
}
} else {
Err(hr.into())
}
}
}
pub fn segment_count(&self) -> Result<u32, Error> {
unsafe {
let mut count = 0;
let result = self.ptr.GetSegmentCount(&mut count);
if SUCCEEDED(result) {
Ok(count)
} else {
Err(From::from(result))
}
}
}
pub fn figure_count(&self) -> Result<u32, Error> {
unsafe {
let mut count = 0;
let result = self.ptr.GetFigureCount(&mut count);
if SUCCEEDED(result) {
Ok(count)
} else {
Err(From::from(result))
}
}
}
}
unsafe impl IResource for PathGeometry {
unsafe fn raw_resource(&self) -> &ID2D1Resource {
&self.ptr
}
}
unsafe impl IGeometry for PathGeometry {
unsafe fn | (&self) -> &ID2D1Geometry {
&self.ptr
}
}
unsafe impl super::GeometryType for PathGeometry {}
| raw_geom | identifier_name |
path.rs | use crate::factory::IFactory;
use crate::geometry::IGeometry;
use crate::resource::IResource;
use com_wrapper::ComWrapper;
use dcommon::Error;
use winapi::shared::winerror::SUCCEEDED;
use winapi::um::d2d1::{ID2D1Geometry, ID2D1PathGeometry, ID2D1Resource};
use wio::com::ComPtr;
pub use self::builder::*;
pub mod builder;
#[repr(transparent)]
#[derive(ComWrapper, Clone)]
#[com(send, sync, debug)]
/// Custom-shaped geometry made of lines and curves
pub struct PathGeometry {
ptr: ComPtr<ID2D1PathGeometry>,
}
impl PathGeometry {
pub fn create(factory: &dyn IFactory) -> Result<PathBuilder, Error> {
unsafe {
let mut ptr = std::ptr::null_mut();
let hr = factory.raw_f().CreatePathGeometry(&mut ptr);
if SUCCEEDED(hr) {
let path = PathGeometry::from_raw(ptr);
let mut ptr = std::ptr::null_mut();
let hr = path.ptr.Open(&mut ptr);
if SUCCEEDED(hr) {
Ok(PathBuilder {
path: path,
sink: ComPtr::from_raw(ptr),
})
} else {
Err(hr.into())
}
} else {
Err(hr.into())
}
}
}
pub fn segment_count(&self) -> Result<u32, Error> {
unsafe {
let mut count = 0;
let result = self.ptr.GetSegmentCount(&mut count);
if SUCCEEDED(result) {
Ok(count)
} else {
Err(From::from(result))
}
}
}
pub fn figure_count(&self) -> Result<u32, Error> {
unsafe {
let mut count = 0;
let result = self.ptr.GetFigureCount(&mut count);
if SUCCEEDED(result) {
Ok(count)
} else { | }
unsafe impl IResource for PathGeometry {
unsafe fn raw_resource(&self) -> &ID2D1Resource {
&self.ptr
}
}
unsafe impl IGeometry for PathGeometry {
unsafe fn raw_geom(&self) -> &ID2D1Geometry {
&self.ptr
}
}
unsafe impl super::GeometryType for PathGeometry {} | Err(From::from(result))
}
}
} | random_line_split |
path.rs | use crate::factory::IFactory;
use crate::geometry::IGeometry;
use crate::resource::IResource;
use com_wrapper::ComWrapper;
use dcommon::Error;
use winapi::shared::winerror::SUCCEEDED;
use winapi::um::d2d1::{ID2D1Geometry, ID2D1PathGeometry, ID2D1Resource};
use wio::com::ComPtr;
pub use self::builder::*;
pub mod builder;
#[repr(transparent)]
#[derive(ComWrapper, Clone)]
#[com(send, sync, debug)]
/// Custom-shaped geometry made of lines and curves
pub struct PathGeometry {
ptr: ComPtr<ID2D1PathGeometry>,
}
impl PathGeometry {
pub fn create(factory: &dyn IFactory) -> Result<PathBuilder, Error> {
unsafe {
let mut ptr = std::ptr::null_mut();
let hr = factory.raw_f().CreatePathGeometry(&mut ptr);
if SUCCEEDED(hr) {
let path = PathGeometry::from_raw(ptr);
let mut ptr = std::ptr::null_mut();
let hr = path.ptr.Open(&mut ptr);
if SUCCEEDED(hr) | else {
Err(hr.into())
}
} else {
Err(hr.into())
}
}
}
pub fn segment_count(&self) -> Result<u32, Error> {
unsafe {
let mut count = 0;
let result = self.ptr.GetSegmentCount(&mut count);
if SUCCEEDED(result) {
Ok(count)
} else {
Err(From::from(result))
}
}
}
pub fn figure_count(&self) -> Result<u32, Error> {
unsafe {
let mut count = 0;
let result = self.ptr.GetFigureCount(&mut count);
if SUCCEEDED(result) {
Ok(count)
} else {
Err(From::from(result))
}
}
}
}
unsafe impl IResource for PathGeometry {
unsafe fn raw_resource(&self) -> &ID2D1Resource {
&self.ptr
}
}
unsafe impl IGeometry for PathGeometry {
unsafe fn raw_geom(&self) -> &ID2D1Geometry {
&self.ptr
}
}
unsafe impl super::GeometryType for PathGeometry {}
| {
Ok(PathBuilder {
path: path,
sink: ComPtr::from_raw(ptr),
})
} | conditional_block |
main.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
//! Standalone server for socket_bench
//! ========================================
//!
//! You can run `socket_bench` across a real network by running this bench
//! server remotely. For example,
//!
//! `RUSTFLAGS="-Ctarget-cpu=skylake -Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3" TCP_ADDR=/ip6/::1/tcp/12345 cargo run --release -p socket-bench-server`
//!
//! will run the socket bench server handling the remote_tcp benchmark. A
//! corresponding client would exercise this benchmark using
//!
//! `RUSTFLAGS="-Ctarget-cpu=skylake -Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3" TCP_ADDR=/ip6/::1/tcp/12345 cargo x bench -p network remote_tcp`
use diem_logger::info;
use netcore::transport::tcp::TcpTransport;
use socket_bench_server::{build_tcp_noise_transport, start_stream_server, Args};
use tokio::runtime::Builder;
fn main() {
::diem_logger::Logger::new().init();
let args = Args::from_env();
let rt = Builder::new_multi_thread()
.worker_threads(32)
.enable_all()
.build()
.unwrap();
let executor = rt.handle();
if let Some(addr) = args.tcp_addr {
let addr = start_stream_server(&executor, TcpTransport::default(), addr);
info!("bench: tcp: listening on: {}", addr);
}
if let Some(addr) = args.tcp_noise_addr |
std::thread::park();
}
| {
let addr = start_stream_server(&executor, build_tcp_noise_transport(), addr);
info!("bench: tcp+noise: listening on: {}", addr);
} | conditional_block |
main.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
//! Standalone server for socket_bench
//! ========================================
//!
//! You can run `socket_bench` across a real network by running this bench
//! server remotely. For example,
//!
//! `RUSTFLAGS="-Ctarget-cpu=skylake -Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3" TCP_ADDR=/ip6/::1/tcp/12345 cargo run --release -p socket-bench-server`
//!
//! will run the socket bench server handling the remote_tcp benchmark. A
//! corresponding client would exercise this benchmark using
//!
//! `RUSTFLAGS="-Ctarget-cpu=skylake -Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3" TCP_ADDR=/ip6/::1/tcp/12345 cargo x bench -p network remote_tcp`
use diem_logger::info;
use netcore::transport::tcp::TcpTransport;
use socket_bench_server::{build_tcp_noise_transport, start_stream_server, Args};
use tokio::runtime::Builder;
fn main() | }
std::thread::park();
}
| {
::diem_logger::Logger::new().init();
let args = Args::from_env();
let rt = Builder::new_multi_thread()
.worker_threads(32)
.enable_all()
.build()
.unwrap();
let executor = rt.handle();
if let Some(addr) = args.tcp_addr {
let addr = start_stream_server(&executor, TcpTransport::default(), addr);
info!("bench: tcp: listening on: {}", addr);
}
if let Some(addr) = args.tcp_noise_addr {
let addr = start_stream_server(&executor, build_tcp_noise_transport(), addr);
info!("bench: tcp+noise: listening on: {}", addr); | identifier_body |
main.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
//! Standalone server for socket_bench
//! ========================================
//!
//! You can run `socket_bench` across a real network by running this bench
//! server remotely. For example,
//!
//! `RUSTFLAGS="-Ctarget-cpu=skylake -Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3" TCP_ADDR=/ip6/::1/tcp/12345 cargo run --release -p socket-bench-server`
//!
//! will run the socket bench server handling the remote_tcp benchmark. A
//! corresponding client would exercise this benchmark using
//!
//! `RUSTFLAGS="-Ctarget-cpu=skylake -Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3" TCP_ADDR=/ip6/::1/tcp/12345 cargo x bench -p network remote_tcp`
use diem_logger::info;
use netcore::transport::tcp::TcpTransport;
use socket_bench_server::{build_tcp_noise_transport, start_stream_server, Args};
use tokio::runtime::Builder;
fn main() {
::diem_logger::Logger::new().init();
let args = Args::from_env();
let rt = Builder::new_multi_thread()
.worker_threads(32)
.enable_all()
.build()
.unwrap();
let executor = rt.handle();
if let Some(addr) = args.tcp_addr {
let addr = start_stream_server(&executor, TcpTransport::default(), addr);
info!("bench: tcp: listening on: {}", addr);
}
if let Some(addr) = args.tcp_noise_addr {
let addr = start_stream_server(&executor, build_tcp_noise_transport(), addr);
info!("bench: tcp+noise: listening on: {}", addr);
}
std::thread::park(); | } | random_line_split |
|
main.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
//! Standalone server for socket_bench
//! ========================================
//!
//! You can run `socket_bench` across a real network by running this bench
//! server remotely. For example,
//!
//! `RUSTFLAGS="-Ctarget-cpu=skylake -Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3" TCP_ADDR=/ip6/::1/tcp/12345 cargo run --release -p socket-bench-server`
//!
//! will run the socket bench server handling the remote_tcp benchmark. A
//! corresponding client would exercise this benchmark using
//!
//! `RUSTFLAGS="-Ctarget-cpu=skylake -Ctarget-feature=+aes,+sse2,+sse4.1,+ssse3" TCP_ADDR=/ip6/::1/tcp/12345 cargo x bench -p network remote_tcp`
use diem_logger::info;
use netcore::transport::tcp::TcpTransport;
use socket_bench_server::{build_tcp_noise_transport, start_stream_server, Args};
use tokio::runtime::Builder;
fn | () {
::diem_logger::Logger::new().init();
let args = Args::from_env();
let rt = Builder::new_multi_thread()
.worker_threads(32)
.enable_all()
.build()
.unwrap();
let executor = rt.handle();
if let Some(addr) = args.tcp_addr {
let addr = start_stream_server(&executor, TcpTransport::default(), addr);
info!("bench: tcp: listening on: {}", addr);
}
if let Some(addr) = args.tcp_noise_addr {
let addr = start_stream_server(&executor, build_tcp_noise_transport(), addr);
info!("bench: tcp+noise: listening on: {}", addr);
}
std::thread::park();
}
| main | identifier_name |
force_generator.rs | #![allow(missing_docs)] // for downcast.
use downcast_rs::Downcast;
use generational_arena::Arena;
use na::RealField;
use crate::object::{BodyHandle, BodySet, DefaultBodyHandle};
use crate::solver::IntegrationParameters;
/// Default force generator set based on an arena with generational indices.
pub type DefaultForceGeneratorSet<N: RealField, Handle: BodyHandle = DefaultBodyHandle> =
Arena<Box<dyn ForceGenerator<N, Handle>>>;
/// Trait implemented by sets of force generators.
///
/// A set of bodies maps a force generator handle to a force generator instance.
pub trait ForceGeneratorSet<N: RealField, Handle: BodyHandle> {
/// Type of a force generator stored in this set.
type ForceGenerator:?Sized + ForceGenerator<N, Handle>;
/// Type of a force generator handle identifying a force generator in this set.
type Handle: Copy;
/// Gets a reference to the force generator identified by `handle`.
fn get(&self, handle: Self::Handle) -> Option<&Self::ForceGenerator>;
/// Gets a mutable reference to the force generator identified by `handle`.
fn get_mut(&mut self, handle: Self::Handle) -> Option<&mut Self::ForceGenerator>;
/// Check if this set contains a force generator identified by `handle`.
fn contains(&self, handle: Self::Handle) -> bool;
/// Iterate through all the force generators on this set, applying the closure `f` on them.
fn foreach(&self, f: impl FnMut(Self::Handle, &Self::ForceGenerator));
/// Mutable iterates through all the force generators on this set, applying the closure `f` on them.
fn foreach_mut(&mut self, f: impl FnMut(Self::Handle, &mut Self::ForceGenerator));
}
impl<N: RealField, Handle: BodyHandle> ForceGeneratorSet<N, Handle>
for DefaultForceGeneratorSet<N, Handle>
{
type ForceGenerator = dyn ForceGenerator<N, Handle>;
type Handle = DefaultForceGeneratorHandle; |
fn get_mut(&mut self, handle: Self::Handle) -> Option<&mut Self::ForceGenerator> {
self.get_mut(handle).map(|c| &mut **c)
}
fn contains(&self, handle: Self::Handle) -> bool {
self.contains(handle)
}
fn foreach(&self, mut f: impl FnMut(Self::Handle, &Self::ForceGenerator)) {
for (h, b) in self.iter() {
f(h, &**b)
}
}
fn foreach_mut(&mut self, mut f: impl FnMut(Self::Handle, &mut Self::ForceGenerator)) {
for (h, b) in self.iter_mut() {
f(h, &mut **b)
}
}
}
/// The handle of a force generator.
pub type DefaultForceGeneratorHandle = generational_arena::Index;
/// A persistent force generator.
///
/// A force generator applies a force to one or several bodies at each step of the simulation.
pub trait ForceGenerator<N: RealField, Handle: BodyHandle>: Downcast + Send + Sync {
/// Apply forces to some bodies.
fn apply(
&mut self,
parameters: &IntegrationParameters<N>,
bodies: &mut dyn BodySet<N, Handle = Handle>,
);
}
impl_downcast!(ForceGenerator<N, Handle> where N: RealField, Handle: BodyHandle); |
fn get(&self, handle: Self::Handle) -> Option<&Self::ForceGenerator> {
self.get(handle).map(|c| &**c)
} | random_line_split |
force_generator.rs | #![allow(missing_docs)] // for downcast.
use downcast_rs::Downcast;
use generational_arena::Arena;
use na::RealField;
use crate::object::{BodyHandle, BodySet, DefaultBodyHandle};
use crate::solver::IntegrationParameters;
/// Default force generator set based on an arena with generational indices.
pub type DefaultForceGeneratorSet<N: RealField, Handle: BodyHandle = DefaultBodyHandle> =
Arena<Box<dyn ForceGenerator<N, Handle>>>;
/// Trait implemented by sets of force generators.
///
/// A set of bodies maps a force generator handle to a force generator instance.
pub trait ForceGeneratorSet<N: RealField, Handle: BodyHandle> {
/// Type of a force generator stored in this set.
type ForceGenerator:?Sized + ForceGenerator<N, Handle>;
/// Type of a force generator handle identifying a force generator in this set.
type Handle: Copy;
/// Gets a reference to the force generator identified by `handle`.
fn get(&self, handle: Self::Handle) -> Option<&Self::ForceGenerator>;
/// Gets a mutable reference to the force generator identified by `handle`.
fn get_mut(&mut self, handle: Self::Handle) -> Option<&mut Self::ForceGenerator>;
/// Check if this set contains a force generator identified by `handle`.
fn contains(&self, handle: Self::Handle) -> bool;
/// Iterate through all the force generators on this set, applying the closure `f` on them.
fn foreach(&self, f: impl FnMut(Self::Handle, &Self::ForceGenerator));
/// Mutable iterates through all the force generators on this set, applying the closure `f` on them.
fn foreach_mut(&mut self, f: impl FnMut(Self::Handle, &mut Self::ForceGenerator));
}
impl<N: RealField, Handle: BodyHandle> ForceGeneratorSet<N, Handle>
for DefaultForceGeneratorSet<N, Handle>
{
type ForceGenerator = dyn ForceGenerator<N, Handle>;
type Handle = DefaultForceGeneratorHandle;
fn get(&self, handle: Self::Handle) -> Option<&Self::ForceGenerator> |
fn get_mut(&mut self, handle: Self::Handle) -> Option<&mut Self::ForceGenerator> {
self.get_mut(handle).map(|c| &mut **c)
}
fn contains(&self, handle: Self::Handle) -> bool {
self.contains(handle)
}
fn foreach(&self, mut f: impl FnMut(Self::Handle, &Self::ForceGenerator)) {
for (h, b) in self.iter() {
f(h, &**b)
}
}
fn foreach_mut(&mut self, mut f: impl FnMut(Self::Handle, &mut Self::ForceGenerator)) {
for (h, b) in self.iter_mut() {
f(h, &mut **b)
}
}
}
/// The handle of a force generator.
pub type DefaultForceGeneratorHandle = generational_arena::Index;
/// A persistent force generator.
///
/// A force generator applies a force to one or several bodies at each step of the simulation.
pub trait ForceGenerator<N: RealField, Handle: BodyHandle>: Downcast + Send + Sync {
/// Apply forces to some bodies.
fn apply(
&mut self,
parameters: &IntegrationParameters<N>,
bodies: &mut dyn BodySet<N, Handle = Handle>,
);
}
impl_downcast!(ForceGenerator<N, Handle> where N: RealField, Handle: BodyHandle);
| {
self.get(handle).map(|c| &**c)
} | identifier_body |
force_generator.rs | #![allow(missing_docs)] // for downcast.
use downcast_rs::Downcast;
use generational_arena::Arena;
use na::RealField;
use crate::object::{BodyHandle, BodySet, DefaultBodyHandle};
use crate::solver::IntegrationParameters;
/// Default force generator set based on an arena with generational indices.
pub type DefaultForceGeneratorSet<N: RealField, Handle: BodyHandle = DefaultBodyHandle> =
Arena<Box<dyn ForceGenerator<N, Handle>>>;
/// Trait implemented by sets of force generators.
///
/// A set of bodies maps a force generator handle to a force generator instance.
pub trait ForceGeneratorSet<N: RealField, Handle: BodyHandle> {
/// Type of a force generator stored in this set.
type ForceGenerator:?Sized + ForceGenerator<N, Handle>;
/// Type of a force generator handle identifying a force generator in this set.
type Handle: Copy;
/// Gets a reference to the force generator identified by `handle`.
fn get(&self, handle: Self::Handle) -> Option<&Self::ForceGenerator>;
/// Gets a mutable reference to the force generator identified by `handle`.
fn get_mut(&mut self, handle: Self::Handle) -> Option<&mut Self::ForceGenerator>;
/// Check if this set contains a force generator identified by `handle`.
fn contains(&self, handle: Self::Handle) -> bool;
/// Iterate through all the force generators on this set, applying the closure `f` on them.
fn foreach(&self, f: impl FnMut(Self::Handle, &Self::ForceGenerator));
/// Mutable iterates through all the force generators on this set, applying the closure `f` on them.
fn foreach_mut(&mut self, f: impl FnMut(Self::Handle, &mut Self::ForceGenerator));
}
impl<N: RealField, Handle: BodyHandle> ForceGeneratorSet<N, Handle>
for DefaultForceGeneratorSet<N, Handle>
{
type ForceGenerator = dyn ForceGenerator<N, Handle>;
type Handle = DefaultForceGeneratorHandle;
fn get(&self, handle: Self::Handle) -> Option<&Self::ForceGenerator> {
self.get(handle).map(|c| &**c)
}
fn | (&mut self, handle: Self::Handle) -> Option<&mut Self::ForceGenerator> {
self.get_mut(handle).map(|c| &mut **c)
}
fn contains(&self, handle: Self::Handle) -> bool {
self.contains(handle)
}
fn foreach(&self, mut f: impl FnMut(Self::Handle, &Self::ForceGenerator)) {
for (h, b) in self.iter() {
f(h, &**b)
}
}
fn foreach_mut(&mut self, mut f: impl FnMut(Self::Handle, &mut Self::ForceGenerator)) {
for (h, b) in self.iter_mut() {
f(h, &mut **b)
}
}
}
/// The handle of a force generator.
pub type DefaultForceGeneratorHandle = generational_arena::Index;
/// A persistent force generator.
///
/// A force generator applies a force to one or several bodies at each step of the simulation.
pub trait ForceGenerator<N: RealField, Handle: BodyHandle>: Downcast + Send + Sync {
/// Apply forces to some bodies.
fn apply(
&mut self,
parameters: &IntegrationParameters<N>,
bodies: &mut dyn BodySet<N, Handle = Handle>,
);
}
impl_downcast!(ForceGenerator<N, Handle> where N: RealField, Handle: BodyHandle);
| get_mut | identifier_name |
ray_bounding_sphere.rs | use crate::bounding_volume::BoundingSphere;
use crate::math::Isometry;
use crate::query::{Ray, RayCast, RayIntersection};
use crate::shape::Ball;
use na::RealField;
impl<N: RealField> RayCast<N> for BoundingSphere<N> {
#[inline]
fn toi_with_ray(&self, m: &Isometry<N>, ray: &Ray<N>, max_toi: N, solid: bool) -> Option<N> {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_with_ray(m, ¢ered_ray, max_toi, solid)
}
#[inline]
fn toi_and_normal_with_ray(
&self,
m: &Isometry<N>,
ray: &Ray<N>,
max_toi: N,
solid: bool,
) -> Option<RayIntersection<N>> {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_and_normal_with_ray(
&Isometry::identity(),
¢ered_ray,
max_toi,
solid,
)
}
#[cfg(feature = "dim3")]
#[inline]
fn | (
&self,
m: &Isometry<N>,
ray: &Ray<N>,
max_toi: N,
solid: bool,
) -> Option<RayIntersection<N>> {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_and_normal_and_uv_with_ray(
&Isometry::identity(),
¢ered_ray,
max_toi,
solid,
)
}
#[inline]
fn intersects_ray(&self, m: &Isometry<N>, ray: &Ray<N>, max_toi: N) -> bool {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).intersects_ray(&Isometry::identity(), ¢ered_ray, max_toi)
}
}
| toi_and_normal_and_uv_with_ray | identifier_name |
ray_bounding_sphere.rs | use crate::bounding_volume::BoundingSphere;
use crate::math::Isometry;
use crate::query::{Ray, RayCast, RayIntersection};
use crate::shape::Ball;
use na::RealField;
impl<N: RealField> RayCast<N> for BoundingSphere<N> {
#[inline]
fn toi_with_ray(&self, m: &Isometry<N>, ray: &Ray<N>, max_toi: N, solid: bool) -> Option<N> {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_with_ray(m, ¢ered_ray, max_toi, solid)
}
#[inline]
fn toi_and_normal_with_ray(
&self,
m: &Isometry<N>,
ray: &Ray<N>,
max_toi: N,
solid: bool,
) -> Option<RayIntersection<N>> |
#[cfg(feature = "dim3")]
#[inline]
fn toi_and_normal_and_uv_with_ray(
&self,
m: &Isometry<N>,
ray: &Ray<N>,
max_toi: N,
solid: bool,
) -> Option<RayIntersection<N>> {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_and_normal_and_uv_with_ray(
&Isometry::identity(),
¢ered_ray,
max_toi,
solid,
)
}
#[inline]
fn intersects_ray(&self, m: &Isometry<N>, ray: &Ray<N>, max_toi: N) -> bool {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).intersects_ray(&Isometry::identity(), ¢ered_ray, max_toi)
}
}
| {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_and_normal_with_ray(
&Isometry::identity(),
¢ered_ray,
max_toi,
solid,
)
} | identifier_body |
ray_bounding_sphere.rs | use crate::bounding_volume::BoundingSphere;
use crate::math::Isometry;
use crate::query::{Ray, RayCast, RayIntersection};
use crate::shape::Ball;
use na::RealField;
impl<N: RealField> RayCast<N> for BoundingSphere<N> {
#[inline]
fn toi_with_ray(&self, m: &Isometry<N>, ray: &Ray<N>, max_toi: N, solid: bool) -> Option<N> {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_with_ray(m, ¢ered_ray, max_toi, solid)
}
#[inline]
fn toi_and_normal_with_ray(
&self,
m: &Isometry<N>,
ray: &Ray<N>,
max_toi: N,
solid: bool,
) -> Option<RayIntersection<N>> {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_and_normal_with_ray(
&Isometry::identity(),
¢ered_ray,
max_toi,
solid,
)
}
#[cfg(feature = "dim3")]
#[inline]
fn toi_and_normal_and_uv_with_ray(
&self,
m: &Isometry<N>,
ray: &Ray<N>,
max_toi: N,
solid: bool,
) -> Option<RayIntersection<N>> {
let centered_ray = ray.translate_by(-(m * self.center()).coords);
Ball::new(self.radius()).toi_and_normal_and_uv_with_ray(
&Isometry::identity(),
¢ered_ray,
max_toi,
solid,
)
}
#[inline] |
Ball::new(self.radius()).intersects_ray(&Isometry::identity(), ¢ered_ray, max_toi)
}
} | fn intersects_ray(&self, m: &Isometry<N>, ray: &Ray<N>, max_toi: N) -> bool {
let centered_ray = ray.translate_by(-(m * self.center()).coords); | random_line_split |
regions-creating-enums5.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![allow(non_camel_case_types)]
// pretty-expanded FIXME #23616
enum | <'a> {
num(usize),
add(&'a ast<'a>, &'a ast<'a>)
}
fn mk_add_ok<'a>(x: &'a ast<'a>, y: &'a ast<'a>, _z: &ast) -> ast<'a> {
ast::add(x, y)
}
pub fn main() {
}
| ast | identifier_name |
regions-creating-enums5.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![allow(non_camel_case_types)]
// pretty-expanded FIXME #23616
enum ast<'a> {
num(usize),
add(&'a ast<'a>, &'a ast<'a>)
}
fn mk_add_ok<'a>(x: &'a ast<'a>, y: &'a ast<'a>, _z: &ast) -> ast<'a> {
ast::add(x, y)
}
pub fn main() | {
} | identifier_body |
|
regions-creating-enums5.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | #![allow(dead_code)]
#![allow(non_camel_case_types)]
// pretty-expanded FIXME #23616
enum ast<'a> {
num(usize),
add(&'a ast<'a>, &'a ast<'a>)
}
fn mk_add_ok<'a>(x: &'a ast<'a>, y: &'a ast<'a>, _z: &ast) -> ast<'a> {
ast::add(x, y)
}
pub fn main() {
} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass | random_line_split |
archive.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A helper class for dealing with static archives
use back::link::{get_ar_prog};
use driver::session::Session;
use metadata::filesearch;
use lib::llvm::{ArchiveRef, llvm};
use libc;
use std::io::process::{Command, ProcessOutput};
use std::io::{fs, TempDir};
use std::io;
use std::mem;
use std::os;
use std::raw;
use std::str;
use syntax::abi;
pub static METADATA_FILENAME: &'static str = "rust.metadata.bin";
pub struct Archive<'a> {
sess: &'a Session,
dst: Path,
}
pub struct ArchiveRO {
ptr: ArchiveRef,
}
fn run_ar(sess: &Session, args: &str, cwd: Option<&Path>,
paths: &[&Path]) -> ProcessOutput {
let ar = get_ar_prog(sess);
let mut cmd = Command::new(ar.as_slice());
cmd.arg(args).args(paths);
debug!("{}", cmd);
match cwd {
Some(p) => {
cmd.cwd(p);
debug!("inside {}", p.display());
}
None => {}
}
match cmd.spawn() {
Ok(prog) => {
let o = prog.wait_with_output().unwrap();
if!o.status.success() {
sess.err(format!("{} failed with: {}", cmd, o.status));
sess.note(format!("stdout ---\n{}",
str::from_utf8(o.output.as_slice()).unwrap()));
sess.note(format!("stderr ---\n{}",
str::from_utf8(o.error.as_slice()).unwrap()));
sess.abort_if_errors();
}
o
},
Err(e) => {
sess.err(format!("could not exec `{}`: {}", ar.as_slice(), e));
sess.abort_if_errors();
fail!("rustc::back::archive::run_ar() should not reach this point");
}
}
}
impl<'a> Archive<'a> {
/// Initializes a new static archive with the given object file
pub fn create<'b>(sess: &'a Session, dst: &'b Path,
initial_object: &'b Path) -> Archive<'a> {
run_ar(sess, "crus", None, [dst, initial_object]);
Archive { sess: sess, dst: dst.clone() }
}
/// Opens an existing static archive
pub fn open(sess: &'a Session, dst: Path) -> Archive<'a> {
assert!(dst.exists());
Archive { sess: sess, dst: dst }
}
/// Adds all of the contents of a native library to this archive. This will
/// search in the relevant locations for a library named `name`.
pub fn add_native_library(&mut self, name: &str) -> io::IoResult<()> {
let location = self.find_library(name);
self.add_archive(&location, name, [])
}
/// Adds all of the contents of the rlib at the specified path to this
/// archive.
///
/// This ignores adding the bytecode from the rlib, and if LTO is enabled
/// then the object file also isn't added.
pub fn add_rlib(&mut self, rlib: &Path, name: &str,
lto: bool) -> io::IoResult<()> {
let object = format!("{}.o", name);
let bytecode = format!("{}.bc.deflate", name);
let mut ignore = vec!(METADATA_FILENAME, bytecode.as_slice());
if lto {
ignore.push(object.as_slice());
}
self.add_archive(rlib, name, ignore.as_slice())
}
/// Adds an arbitrary file to this archive
pub fn add_file(&mut self, file: &Path, has_symbols: bool) {
let cmd = if has_symbols {"r"} else {"rS"};
run_ar(self.sess, cmd, None, [&self.dst, file]);
}
/// Removes a file from this archive
pub fn remove_file(&mut self, file: &str) {
run_ar(self.sess, "d", None, [&self.dst, &Path::new(file)]);
}
/// Updates all symbols in the archive (runs 'ar s' over it)
pub fn update_symbols(&mut self) {
run_ar(self.sess, "s", None, [&self.dst]);
}
/// Lists all files in an archive
pub fn files(&self) -> Vec<StrBuf> {
let output = run_ar(self.sess, "t", None, [&self.dst]);
let output = str::from_utf8(output.output.as_slice()).unwrap();
// use lines_any because windows delimits output with `\r\n` instead of
// just `\n`
output.lines_any().map(|s| s.to_strbuf()).collect()
}
fn add_archive(&mut self, archive: &Path, name: &str,
skip: &[&str]) -> io::IoResult<()> {
let loc = TempDir::new("rsar").unwrap();
// First, extract the contents of the archive to a temporary directory
let archive = os::make_absolute(archive);
run_ar(self.sess, "x", Some(loc.path()), [&archive]);
// Next, we must rename all of the inputs to "guaranteed unique names".
// The reason for this is that archives are keyed off the name of the
// files, so if two files have the same name they will override one
// another in the archive (bad).
//
// We skip any files explicitly desired for skipping, and we also skip
// all SYMDEF files as these are just magical placeholders which get
// re-created when we make a new archive anyway.
let files = try!(fs::readdir(loc.path()));
let mut inputs = Vec::new();
for file in files.iter() {
let filename = file.filename_str().unwrap();
if skip.iter().any(|s| *s == filename) { continue }
if filename.contains(".SYMDEF") { continue }
let filename = format!("r-{}-{}", name, filename);
let new_filename = file.with_filename(filename);
try!(fs::rename(file, &new_filename));
inputs.push(new_filename);
}
if inputs.len() == 0 { return Ok(()) }
// Finally, add all the renamed files to this archive
let mut args = vec!(&self.dst);
args.extend(inputs.iter());
run_ar(self.sess, "r", None, args.as_slice());
Ok(())
}
fn find_library(&self, name: &str) -> Path {
let (osprefix, osext) = match self.sess.targ_cfg.os {
abi::OsWin32 => ("", "lib"), _ => ("lib", "a"),
};
// On Windows, static libraries sometimes show up as libfoo.a and other
// times show up as foo.lib
let oslibname = format!("{}{}.{}", osprefix, name, osext);
let unixlibname = format!("lib{}.a", name);
let mut rustpath = filesearch::rust_path();
rustpath.push(self.sess.target_filesearch().get_lib_path());
let search = self.sess.opts.addl_lib_search_paths.borrow();
for path in search.iter().chain(rustpath.iter()) {
debug!("looking for {} inside {}", name, path.display());
let test = path.join(oslibname.as_slice());
if test.exists() { return test }
if oslibname!= unixlibname {
let test = path.join(unixlibname.as_slice());
if test.exists() { return test }
}
}
self.sess.fatal(format!("could not find native static library `{}`, \
perhaps an -L flag is missing?", name));
}
}
impl ArchiveRO {
/// Opens a static archive for read-only purposes. This is more optimized
/// than the `open` method because it uses LLVM's internal `Archive` class
/// rather than shelling out to `ar` for everything.
///
/// If this archive is used with a mutable method, then an error will be
/// raised.
pub fn open(dst: &Path) -> Option<ArchiveRO> {
unsafe {
let ar = dst.with_c_str(|dst| {
llvm::LLVMRustOpenArchive(dst)
});
if ar.is_null() {
None
} else {
Some(ArchiveRO { ptr: ar })
}
}
}
/// Reads a file in the archive
pub fn | <'a>(&'a self, file: &str) -> Option<&'a [u8]> {
unsafe {
let mut size = 0 as libc::size_t;
let ptr = file.with_c_str(|file| {
llvm::LLVMRustArchiveReadSection(self.ptr, file, &mut size)
});
if ptr.is_null() {
None
} else {
Some(mem::transmute(raw::Slice {
data: ptr,
len: size as uint,
}))
}
}
}
}
impl Drop for ArchiveRO {
fn drop(&mut self) {
unsafe {
llvm::LLVMRustDestroyArchive(self.ptr);
}
}
}
| read | identifier_name |
archive.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A helper class for dealing with static archives
use back::link::{get_ar_prog};
use driver::session::Session;
use metadata::filesearch;
use lib::llvm::{ArchiveRef, llvm};
use libc;
use std::io::process::{Command, ProcessOutput};
use std::io::{fs, TempDir};
use std::io;
use std::mem;
use std::os;
use std::raw;
use std::str;
use syntax::abi;
pub static METADATA_FILENAME: &'static str = "rust.metadata.bin";
pub struct Archive<'a> {
sess: &'a Session,
dst: Path,
}
pub struct ArchiveRO {
ptr: ArchiveRef,
}
fn run_ar(sess: &Session, args: &str, cwd: Option<&Path>,
paths: &[&Path]) -> ProcessOutput {
let ar = get_ar_prog(sess);
let mut cmd = Command::new(ar.as_slice());
cmd.arg(args).args(paths);
debug!("{}", cmd);
match cwd {
Some(p) => {
cmd.cwd(p);
debug!("inside {}", p.display());
}
None => {}
}
match cmd.spawn() {
Ok(prog) => {
let o = prog.wait_with_output().unwrap();
if!o.status.success() {
sess.err(format!("{} failed with: {}", cmd, o.status));
sess.note(format!("stdout ---\n{}",
str::from_utf8(o.output.as_slice()).unwrap()));
sess.note(format!("stderr ---\n{}",
str::from_utf8(o.error.as_slice()).unwrap()));
sess.abort_if_errors();
}
o
},
Err(e) => {
sess.err(format!("could not exec `{}`: {}", ar.as_slice(), e));
sess.abort_if_errors();
fail!("rustc::back::archive::run_ar() should not reach this point");
}
}
}
impl<'a> Archive<'a> {
/// Initializes a new static archive with the given object file
pub fn create<'b>(sess: &'a Session, dst: &'b Path,
initial_object: &'b Path) -> Archive<'a> {
run_ar(sess, "crus", None, [dst, initial_object]);
Archive { sess: sess, dst: dst.clone() }
}
/// Opens an existing static archive
pub fn open(sess: &'a Session, dst: Path) -> Archive<'a> {
assert!(dst.exists());
Archive { sess: sess, dst: dst }
}
/// Adds all of the contents of a native library to this archive. This will
/// search in the relevant locations for a library named `name`.
pub fn add_native_library(&mut self, name: &str) -> io::IoResult<()> {
let location = self.find_library(name);
self.add_archive(&location, name, [])
}
/// Adds all of the contents of the rlib at the specified path to this
/// archive.
///
/// This ignores adding the bytecode from the rlib, and if LTO is enabled
/// then the object file also isn't added.
pub fn add_rlib(&mut self, rlib: &Path, name: &str,
lto: bool) -> io::IoResult<()> {
let object = format!("{}.o", name);
let bytecode = format!("{}.bc.deflate", name);
let mut ignore = vec!(METADATA_FILENAME, bytecode.as_slice());
if lto {
ignore.push(object.as_slice());
}
self.add_archive(rlib, name, ignore.as_slice())
}
/// Adds an arbitrary file to this archive
pub fn add_file(&mut self, file: &Path, has_symbols: bool) {
let cmd = if has_symbols {"r"} else {"rS"};
run_ar(self.sess, cmd, None, [&self.dst, file]);
}
/// Removes a file from this archive
pub fn remove_file(&mut self, file: &str) {
run_ar(self.sess, "d", None, [&self.dst, &Path::new(file)]);
}
/// Updates all symbols in the archive (runs 'ar s' over it)
pub fn update_symbols(&mut self) {
run_ar(self.sess, "s", None, [&self.dst]);
}
/// Lists all files in an archive
pub fn files(&self) -> Vec<StrBuf> {
let output = run_ar(self.sess, "t", None, [&self.dst]);
let output = str::from_utf8(output.output.as_slice()).unwrap();
// use lines_any because windows delimits output with `\r\n` instead of
// just `\n`
output.lines_any().map(|s| s.to_strbuf()).collect()
}
fn add_archive(&mut self, archive: &Path, name: &str,
skip: &[&str]) -> io::IoResult<()> {
let loc = TempDir::new("rsar").unwrap();
// First, extract the contents of the archive to a temporary directory
let archive = os::make_absolute(archive);
run_ar(self.sess, "x", Some(loc.path()), [&archive]);
// Next, we must rename all of the inputs to "guaranteed unique names".
// The reason for this is that archives are keyed off the name of the
// files, so if two files have the same name they will override one
// another in the archive (bad).
//
// We skip any files explicitly desired for skipping, and we also skip
// all SYMDEF files as these are just magical placeholders which get
// re-created when we make a new archive anyway.
let files = try!(fs::readdir(loc.path()));
let mut inputs = Vec::new();
for file in files.iter() {
let filename = file.filename_str().unwrap();
if skip.iter().any(|s| *s == filename) { continue }
if filename.contains(".SYMDEF") { continue }
let filename = format!("r-{}-{}", name, filename);
let new_filename = file.with_filename(filename);
try!(fs::rename(file, &new_filename));
inputs.push(new_filename);
}
if inputs.len() == 0 { return Ok(()) }
// Finally, add all the renamed files to this archive
let mut args = vec!(&self.dst);
args.extend(inputs.iter());
run_ar(self.sess, "r", None, args.as_slice());
Ok(())
}
fn find_library(&self, name: &str) -> Path {
let (osprefix, osext) = match self.sess.targ_cfg.os {
abi::OsWin32 => ("", "lib"), _ => ("lib", "a"),
};
// On Windows, static libraries sometimes show up as libfoo.a and other
// times show up as foo.lib
let oslibname = format!("{}{}.{}", osprefix, name, osext);
let unixlibname = format!("lib{}.a", name);
let mut rustpath = filesearch::rust_path();
rustpath.push(self.sess.target_filesearch().get_lib_path());
let search = self.sess.opts.addl_lib_search_paths.borrow();
for path in search.iter().chain(rustpath.iter()) {
debug!("looking for {} inside {}", name, path.display());
let test = path.join(oslibname.as_slice());
if test.exists() { return test }
if oslibname!= unixlibname {
let test = path.join(unixlibname.as_slice());
if test.exists() { return test }
}
}
self.sess.fatal(format!("could not find native static library `{}`, \
perhaps an -L flag is missing?", name));
}
}
impl ArchiveRO {
/// Opens a static archive for read-only purposes. This is more optimized
/// than the `open` method because it uses LLVM's internal `Archive` class
/// rather than shelling out to `ar` for everything.
///
/// If this archive is used with a mutable method, then an error will be
/// raised.
pub fn open(dst: &Path) -> Option<ArchiveRO> |
/// Reads a file in the archive
pub fn read<'a>(&'a self, file: &str) -> Option<&'a [u8]> {
unsafe {
let mut size = 0 as libc::size_t;
let ptr = file.with_c_str(|file| {
llvm::LLVMRustArchiveReadSection(self.ptr, file, &mut size)
});
if ptr.is_null() {
None
} else {
Some(mem::transmute(raw::Slice {
data: ptr,
len: size as uint,
}))
}
}
}
}
impl Drop for ArchiveRO {
fn drop(&mut self) {
unsafe {
llvm::LLVMRustDestroyArchive(self.ptr);
}
}
}
| {
unsafe {
let ar = dst.with_c_str(|dst| {
llvm::LLVMRustOpenArchive(dst)
});
if ar.is_null() {
None
} else {
Some(ArchiveRO { ptr: ar })
}
}
} | identifier_body |
archive.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A helper class for dealing with static archives
use back::link::{get_ar_prog};
use driver::session::Session;
use metadata::filesearch;
use lib::llvm::{ArchiveRef, llvm};
use libc;
use std::io::process::{Command, ProcessOutput};
use std::io::{fs, TempDir};
use std::io;
use std::mem;
use std::os;
use std::raw;
use std::str;
use syntax::abi;
pub static METADATA_FILENAME: &'static str = "rust.metadata.bin";
pub struct Archive<'a> {
sess: &'a Session,
dst: Path,
}
pub struct ArchiveRO {
ptr: ArchiveRef,
}
fn run_ar(sess: &Session, args: &str, cwd: Option<&Path>,
paths: &[&Path]) -> ProcessOutput {
let ar = get_ar_prog(sess);
let mut cmd = Command::new(ar.as_slice());
cmd.arg(args).args(paths);
debug!("{}", cmd);
match cwd {
Some(p) => {
cmd.cwd(p);
debug!("inside {}", p.display());
}
None => {}
} | match cmd.spawn() {
Ok(prog) => {
let o = prog.wait_with_output().unwrap();
if!o.status.success() {
sess.err(format!("{} failed with: {}", cmd, o.status));
sess.note(format!("stdout ---\n{}",
str::from_utf8(o.output.as_slice()).unwrap()));
sess.note(format!("stderr ---\n{}",
str::from_utf8(o.error.as_slice()).unwrap()));
sess.abort_if_errors();
}
o
},
Err(e) => {
sess.err(format!("could not exec `{}`: {}", ar.as_slice(), e));
sess.abort_if_errors();
fail!("rustc::back::archive::run_ar() should not reach this point");
}
}
}
impl<'a> Archive<'a> {
/// Initializes a new static archive with the given object file
pub fn create<'b>(sess: &'a Session, dst: &'b Path,
initial_object: &'b Path) -> Archive<'a> {
run_ar(sess, "crus", None, [dst, initial_object]);
Archive { sess: sess, dst: dst.clone() }
}
/// Opens an existing static archive
pub fn open(sess: &'a Session, dst: Path) -> Archive<'a> {
assert!(dst.exists());
Archive { sess: sess, dst: dst }
}
/// Adds all of the contents of a native library to this archive. This will
/// search in the relevant locations for a library named `name`.
pub fn add_native_library(&mut self, name: &str) -> io::IoResult<()> {
let location = self.find_library(name);
self.add_archive(&location, name, [])
}
/// Adds all of the contents of the rlib at the specified path to this
/// archive.
///
/// This ignores adding the bytecode from the rlib, and if LTO is enabled
/// then the object file also isn't added.
pub fn add_rlib(&mut self, rlib: &Path, name: &str,
lto: bool) -> io::IoResult<()> {
let object = format!("{}.o", name);
let bytecode = format!("{}.bc.deflate", name);
let mut ignore = vec!(METADATA_FILENAME, bytecode.as_slice());
if lto {
ignore.push(object.as_slice());
}
self.add_archive(rlib, name, ignore.as_slice())
}
/// Adds an arbitrary file to this archive
pub fn add_file(&mut self, file: &Path, has_symbols: bool) {
let cmd = if has_symbols {"r"} else {"rS"};
run_ar(self.sess, cmd, None, [&self.dst, file]);
}
/// Removes a file from this archive
pub fn remove_file(&mut self, file: &str) {
run_ar(self.sess, "d", None, [&self.dst, &Path::new(file)]);
}
/// Updates all symbols in the archive (runs 'ar s' over it)
pub fn update_symbols(&mut self) {
run_ar(self.sess, "s", None, [&self.dst]);
}
/// Lists all files in an archive
pub fn files(&self) -> Vec<StrBuf> {
let output = run_ar(self.sess, "t", None, [&self.dst]);
let output = str::from_utf8(output.output.as_slice()).unwrap();
// use lines_any because windows delimits output with `\r\n` instead of
// just `\n`
output.lines_any().map(|s| s.to_strbuf()).collect()
}
fn add_archive(&mut self, archive: &Path, name: &str,
skip: &[&str]) -> io::IoResult<()> {
let loc = TempDir::new("rsar").unwrap();
// First, extract the contents of the archive to a temporary directory
let archive = os::make_absolute(archive);
run_ar(self.sess, "x", Some(loc.path()), [&archive]);
// Next, we must rename all of the inputs to "guaranteed unique names".
// The reason for this is that archives are keyed off the name of the
// files, so if two files have the same name they will override one
// another in the archive (bad).
//
// We skip any files explicitly desired for skipping, and we also skip
// all SYMDEF files as these are just magical placeholders which get
// re-created when we make a new archive anyway.
let files = try!(fs::readdir(loc.path()));
let mut inputs = Vec::new();
for file in files.iter() {
let filename = file.filename_str().unwrap();
if skip.iter().any(|s| *s == filename) { continue }
if filename.contains(".SYMDEF") { continue }
let filename = format!("r-{}-{}", name, filename);
let new_filename = file.with_filename(filename);
try!(fs::rename(file, &new_filename));
inputs.push(new_filename);
}
if inputs.len() == 0 { return Ok(()) }
// Finally, add all the renamed files to this archive
let mut args = vec!(&self.dst);
args.extend(inputs.iter());
run_ar(self.sess, "r", None, args.as_slice());
Ok(())
}
fn find_library(&self, name: &str) -> Path {
let (osprefix, osext) = match self.sess.targ_cfg.os {
abi::OsWin32 => ("", "lib"), _ => ("lib", "a"),
};
// On Windows, static libraries sometimes show up as libfoo.a and other
// times show up as foo.lib
let oslibname = format!("{}{}.{}", osprefix, name, osext);
let unixlibname = format!("lib{}.a", name);
let mut rustpath = filesearch::rust_path();
rustpath.push(self.sess.target_filesearch().get_lib_path());
let search = self.sess.opts.addl_lib_search_paths.borrow();
for path in search.iter().chain(rustpath.iter()) {
debug!("looking for {} inside {}", name, path.display());
let test = path.join(oslibname.as_slice());
if test.exists() { return test }
if oslibname!= unixlibname {
let test = path.join(unixlibname.as_slice());
if test.exists() { return test }
}
}
self.sess.fatal(format!("could not find native static library `{}`, \
perhaps an -L flag is missing?", name));
}
}
impl ArchiveRO {
/// Opens a static archive for read-only purposes. This is more optimized
/// than the `open` method because it uses LLVM's internal `Archive` class
/// rather than shelling out to `ar` for everything.
///
/// If this archive is used with a mutable method, then an error will be
/// raised.
pub fn open(dst: &Path) -> Option<ArchiveRO> {
unsafe {
let ar = dst.with_c_str(|dst| {
llvm::LLVMRustOpenArchive(dst)
});
if ar.is_null() {
None
} else {
Some(ArchiveRO { ptr: ar })
}
}
}
/// Reads a file in the archive
pub fn read<'a>(&'a self, file: &str) -> Option<&'a [u8]> {
unsafe {
let mut size = 0 as libc::size_t;
let ptr = file.with_c_str(|file| {
llvm::LLVMRustArchiveReadSection(self.ptr, file, &mut size)
});
if ptr.is_null() {
None
} else {
Some(mem::transmute(raw::Slice {
data: ptr,
len: size as uint,
}))
}
}
}
}
impl Drop for ArchiveRO {
fn drop(&mut self) {
unsafe {
llvm::LLVMRustDestroyArchive(self.ptr);
}
}
} | random_line_split |
|
archive.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A helper class for dealing with static archives
use back::link::{get_ar_prog};
use driver::session::Session;
use metadata::filesearch;
use lib::llvm::{ArchiveRef, llvm};
use libc;
use std::io::process::{Command, ProcessOutput};
use std::io::{fs, TempDir};
use std::io;
use std::mem;
use std::os;
use std::raw;
use std::str;
use syntax::abi;
pub static METADATA_FILENAME: &'static str = "rust.metadata.bin";
pub struct Archive<'a> {
sess: &'a Session,
dst: Path,
}
pub struct ArchiveRO {
ptr: ArchiveRef,
}
fn run_ar(sess: &Session, args: &str, cwd: Option<&Path>,
paths: &[&Path]) -> ProcessOutput {
let ar = get_ar_prog(sess);
let mut cmd = Command::new(ar.as_slice());
cmd.arg(args).args(paths);
debug!("{}", cmd);
match cwd {
Some(p) => {
cmd.cwd(p);
debug!("inside {}", p.display());
}
None => {}
}
match cmd.spawn() {
Ok(prog) => {
let o = prog.wait_with_output().unwrap();
if!o.status.success() {
sess.err(format!("{} failed with: {}", cmd, o.status));
sess.note(format!("stdout ---\n{}",
str::from_utf8(o.output.as_slice()).unwrap()));
sess.note(format!("stderr ---\n{}",
str::from_utf8(o.error.as_slice()).unwrap()));
sess.abort_if_errors();
}
o
},
Err(e) => {
sess.err(format!("could not exec `{}`: {}", ar.as_slice(), e));
sess.abort_if_errors();
fail!("rustc::back::archive::run_ar() should not reach this point");
}
}
}
impl<'a> Archive<'a> {
/// Initializes a new static archive with the given object file
pub fn create<'b>(sess: &'a Session, dst: &'b Path,
initial_object: &'b Path) -> Archive<'a> {
run_ar(sess, "crus", None, [dst, initial_object]);
Archive { sess: sess, dst: dst.clone() }
}
/// Opens an existing static archive
pub fn open(sess: &'a Session, dst: Path) -> Archive<'a> {
assert!(dst.exists());
Archive { sess: sess, dst: dst }
}
/// Adds all of the contents of a native library to this archive. This will
/// search in the relevant locations for a library named `name`.
pub fn add_native_library(&mut self, name: &str) -> io::IoResult<()> {
let location = self.find_library(name);
self.add_archive(&location, name, [])
}
/// Adds all of the contents of the rlib at the specified path to this
/// archive.
///
/// This ignores adding the bytecode from the rlib, and if LTO is enabled
/// then the object file also isn't added.
pub fn add_rlib(&mut self, rlib: &Path, name: &str,
lto: bool) -> io::IoResult<()> {
let object = format!("{}.o", name);
let bytecode = format!("{}.bc.deflate", name);
let mut ignore = vec!(METADATA_FILENAME, bytecode.as_slice());
if lto {
ignore.push(object.as_slice());
}
self.add_archive(rlib, name, ignore.as_slice())
}
/// Adds an arbitrary file to this archive
pub fn add_file(&mut self, file: &Path, has_symbols: bool) {
let cmd = if has_symbols {"r"} else | ;
run_ar(self.sess, cmd, None, [&self.dst, file]);
}
/// Removes a file from this archive
pub fn remove_file(&mut self, file: &str) {
run_ar(self.sess, "d", None, [&self.dst, &Path::new(file)]);
}
/// Updates all symbols in the archive (runs 'ar s' over it)
pub fn update_symbols(&mut self) {
run_ar(self.sess, "s", None, [&self.dst]);
}
/// Lists all files in an archive
pub fn files(&self) -> Vec<StrBuf> {
let output = run_ar(self.sess, "t", None, [&self.dst]);
let output = str::from_utf8(output.output.as_slice()).unwrap();
// use lines_any because windows delimits output with `\r\n` instead of
// just `\n`
output.lines_any().map(|s| s.to_strbuf()).collect()
}
fn add_archive(&mut self, archive: &Path, name: &str,
skip: &[&str]) -> io::IoResult<()> {
let loc = TempDir::new("rsar").unwrap();
// First, extract the contents of the archive to a temporary directory
let archive = os::make_absolute(archive);
run_ar(self.sess, "x", Some(loc.path()), [&archive]);
// Next, we must rename all of the inputs to "guaranteed unique names".
// The reason for this is that archives are keyed off the name of the
// files, so if two files have the same name they will override one
// another in the archive (bad).
//
// We skip any files explicitly desired for skipping, and we also skip
// all SYMDEF files as these are just magical placeholders which get
// re-created when we make a new archive anyway.
let files = try!(fs::readdir(loc.path()));
let mut inputs = Vec::new();
for file in files.iter() {
let filename = file.filename_str().unwrap();
if skip.iter().any(|s| *s == filename) { continue }
if filename.contains(".SYMDEF") { continue }
let filename = format!("r-{}-{}", name, filename);
let new_filename = file.with_filename(filename);
try!(fs::rename(file, &new_filename));
inputs.push(new_filename);
}
if inputs.len() == 0 { return Ok(()) }
// Finally, add all the renamed files to this archive
let mut args = vec!(&self.dst);
args.extend(inputs.iter());
run_ar(self.sess, "r", None, args.as_slice());
Ok(())
}
fn find_library(&self, name: &str) -> Path {
let (osprefix, osext) = match self.sess.targ_cfg.os {
abi::OsWin32 => ("", "lib"), _ => ("lib", "a"),
};
// On Windows, static libraries sometimes show up as libfoo.a and other
// times show up as foo.lib
let oslibname = format!("{}{}.{}", osprefix, name, osext);
let unixlibname = format!("lib{}.a", name);
let mut rustpath = filesearch::rust_path();
rustpath.push(self.sess.target_filesearch().get_lib_path());
let search = self.sess.opts.addl_lib_search_paths.borrow();
for path in search.iter().chain(rustpath.iter()) {
debug!("looking for {} inside {}", name, path.display());
let test = path.join(oslibname.as_slice());
if test.exists() { return test }
if oslibname!= unixlibname {
let test = path.join(unixlibname.as_slice());
if test.exists() { return test }
}
}
self.sess.fatal(format!("could not find native static library `{}`, \
perhaps an -L flag is missing?", name));
}
}
impl ArchiveRO {
/// Opens a static archive for read-only purposes. This is more optimized
/// than the `open` method because it uses LLVM's internal `Archive` class
/// rather than shelling out to `ar` for everything.
///
/// If this archive is used with a mutable method, then an error will be
/// raised.
pub fn open(dst: &Path) -> Option<ArchiveRO> {
unsafe {
let ar = dst.with_c_str(|dst| {
llvm::LLVMRustOpenArchive(dst)
});
if ar.is_null() {
None
} else {
Some(ArchiveRO { ptr: ar })
}
}
}
/// Reads a file in the archive
pub fn read<'a>(&'a self, file: &str) -> Option<&'a [u8]> {
unsafe {
let mut size = 0 as libc::size_t;
let ptr = file.with_c_str(|file| {
llvm::LLVMRustArchiveReadSection(self.ptr, file, &mut size)
});
if ptr.is_null() {
None
} else {
Some(mem::transmute(raw::Slice {
data: ptr,
len: size as uint,
}))
}
}
}
}
impl Drop for ArchiveRO {
fn drop(&mut self) {
unsafe {
llvm::LLVMRustDestroyArchive(self.ptr);
}
}
}
| {"rS"} | conditional_block |
cci_nested_lib.rs | #![feature(box_syntax)]
use std::cell::RefCell;
pub struct Entry<A,B> {
key: A,
value: B
}
pub struct alist<A,B> {
eq_fn: extern "Rust" fn(A,A) -> bool,
data: Box<RefCell<Vec<Entry<A,B>>>>,
}
pub fn alist_add<A:'static,B:'static>(lst: &alist<A,B>, k: A, v: B) {
let mut data = lst.data.borrow_mut();
(*data).push(Entry{key:k, value:v});
}
pub fn | <A:Clone +'static,
B:Clone +'static>(
lst: &alist<A,B>,
k: A)
-> B {
let eq_fn = lst.eq_fn;
let data = lst.data.borrow();
for entry in &(*data) {
if eq_fn(entry.key.clone(), k.clone()) {
return entry.value.clone();
}
}
panic!();
}
#[inline]
pub fn new_int_alist<B:'static>() -> alist<isize, B> {
fn eq_int(a: isize, b: isize) -> bool { a == b }
return alist {
eq_fn: eq_int,
data: box RefCell::new(Vec::new()),
};
}
#[inline]
pub fn new_int_alist_2<B:'static>() -> alist<isize, B> {
#[inline]
fn eq_int(a: isize, b: isize) -> bool { a == b }
return alist {
eq_fn: eq_int,
data: box RefCell::new(Vec::new()),
};
}
| alist_get | identifier_name |
cci_nested_lib.rs | #![feature(box_syntax)]
use std::cell::RefCell;
pub struct Entry<A,B> {
key: A,
value: B
}
pub struct alist<A,B> {
eq_fn: extern "Rust" fn(A,A) -> bool,
data: Box<RefCell<Vec<Entry<A,B>>>>,
}
pub fn alist_add<A:'static,B:'static>(lst: &alist<A,B>, k: A, v: B) {
let mut data = lst.data.borrow_mut();
(*data).push(Entry{key:k, value:v});
}
pub fn alist_get<A:Clone +'static,
B:Clone +'static>(
lst: &alist<A,B>,
k: A)
-> B {
let eq_fn = lst.eq_fn;
let data = lst.data.borrow();
for entry in &(*data) {
if eq_fn(entry.key.clone(), k.clone()) |
}
panic!();
}
#[inline]
pub fn new_int_alist<B:'static>() -> alist<isize, B> {
fn eq_int(a: isize, b: isize) -> bool { a == b }
return alist {
eq_fn: eq_int,
data: box RefCell::new(Vec::new()),
};
}
#[inline]
pub fn new_int_alist_2<B:'static>() -> alist<isize, B> {
#[inline]
fn eq_int(a: isize, b: isize) -> bool { a == b }
return alist {
eq_fn: eq_int,
data: box RefCell::new(Vec::new()),
};
}
| {
return entry.value.clone();
} | conditional_block |
cci_nested_lib.rs | #![feature(box_syntax)]
use std::cell::RefCell;
pub struct Entry<A,B> {
key: A,
value: B
}
pub struct alist<A,B> {
eq_fn: extern "Rust" fn(A,A) -> bool,
data: Box<RefCell<Vec<Entry<A,B>>>>,
}
pub fn alist_add<A:'static,B:'static>(lst: &alist<A,B>, k: A, v: B) {
let mut data = lst.data.borrow_mut();
(*data).push(Entry{key:k, value:v});
}
pub fn alist_get<A:Clone +'static,
B:Clone +'static>(
lst: &alist<A,B>,
k: A)
-> B {
let eq_fn = lst.eq_fn;
let data = lst.data.borrow();
for entry in &(*data) {
if eq_fn(entry.key.clone(), k.clone()) {
return entry.value.clone();
}
}
panic!();
}
#[inline]
pub fn new_int_alist<B:'static>() -> alist<isize, B> {
fn eq_int(a: isize, b: isize) -> bool { a == b }
return alist {
eq_fn: eq_int,
data: box RefCell::new(Vec::new()),
}; |
#[inline]
pub fn new_int_alist_2<B:'static>() -> alist<isize, B> {
#[inline]
fn eq_int(a: isize, b: isize) -> bool { a == b }
return alist {
eq_fn: eq_int,
data: box RefCell::new(Vec::new()),
};
} | } | random_line_split |
audio-whitenoise.rs | extern crate sdl2;
extern crate rand;
use sdl2::audio::{AudioCallback, AudioSpecDesired};
use std::time::Duration;
struct MyCallback {
volume: f32
}
impl AudioCallback for MyCallback {
type Channel = f32;
fn callback(&mut self, out: &mut [f32]) |
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
let audio_subsystem = sdl_context.audio()?;
let desired_spec = AudioSpecDesired {
freq: Some(44_100),
channels: Some(1), // mono
samples: None, // default sample size
};
// None: use default device
let mut device = audio_subsystem.open_playback(None, &desired_spec, |spec| {
// Show obtained AudioSpec
println!("{:?}", spec);
MyCallback { volume: 0.5 }
})?;
// Start playback
device.resume();
// Play for 1 second
std::thread::sleep(Duration::from_millis(1_000));
{
// Acquire a lock. This lets us read and modify callback data.
let mut lock = device.lock();
(*lock).volume = 0.25;
// Lock guard is dropped here
}
// Play for another second
std::thread::sleep(Duration::from_millis(1_000));
// Device is automatically closed when dropped
Ok(())
}
| {
use self::rand::{Rng, thread_rng};
let mut rng = thread_rng();
// Generate white noise
for x in out.iter_mut() {
*x = (rng.gen_range(0.0, 2.0) - 1.0) * self.volume;
}
} | identifier_body |
audio-whitenoise.rs | extern crate sdl2;
extern crate rand;
use sdl2::audio::{AudioCallback, AudioSpecDesired};
use std::time::Duration;
struct | {
volume: f32
}
impl AudioCallback for MyCallback {
type Channel = f32;
fn callback(&mut self, out: &mut [f32]) {
use self::rand::{Rng, thread_rng};
let mut rng = thread_rng();
// Generate white noise
for x in out.iter_mut() {
*x = (rng.gen_range(0.0, 2.0) - 1.0) * self.volume;
}
}
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
let audio_subsystem = sdl_context.audio()?;
let desired_spec = AudioSpecDesired {
freq: Some(44_100),
channels: Some(1), // mono
samples: None, // default sample size
};
// None: use default device
let mut device = audio_subsystem.open_playback(None, &desired_spec, |spec| {
// Show obtained AudioSpec
println!("{:?}", spec);
MyCallback { volume: 0.5 }
})?;
// Start playback
device.resume();
// Play for 1 second
std::thread::sleep(Duration::from_millis(1_000));
{
// Acquire a lock. This lets us read and modify callback data.
let mut lock = device.lock();
(*lock).volume = 0.25;
// Lock guard is dropped here
}
// Play for another second
std::thread::sleep(Duration::from_millis(1_000));
// Device is automatically closed when dropped
Ok(())
}
| MyCallback | identifier_name |
audio-whitenoise.rs | extern crate sdl2; | use std::time::Duration;
struct MyCallback {
volume: f32
}
impl AudioCallback for MyCallback {
type Channel = f32;
fn callback(&mut self, out: &mut [f32]) {
use self::rand::{Rng, thread_rng};
let mut rng = thread_rng();
// Generate white noise
for x in out.iter_mut() {
*x = (rng.gen_range(0.0, 2.0) - 1.0) * self.volume;
}
}
}
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
let audio_subsystem = sdl_context.audio()?;
let desired_spec = AudioSpecDesired {
freq: Some(44_100),
channels: Some(1), // mono
samples: None, // default sample size
};
// None: use default device
let mut device = audio_subsystem.open_playback(None, &desired_spec, |spec| {
// Show obtained AudioSpec
println!("{:?}", spec);
MyCallback { volume: 0.5 }
})?;
// Start playback
device.resume();
// Play for 1 second
std::thread::sleep(Duration::from_millis(1_000));
{
// Acquire a lock. This lets us read and modify callback data.
let mut lock = device.lock();
(*lock).volume = 0.25;
// Lock guard is dropped here
}
// Play for another second
std::thread::sleep(Duration::from_millis(1_000));
// Device is automatically closed when dropped
Ok(())
} | extern crate rand;
use sdl2::audio::{AudioCallback, AudioSpecDesired}; | random_line_split |
look.rs | // Copyright 2015-2016 Joe Neeman.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
use range_map::{Range, RangeSet};
use std::cmp::Ordering;
use unicode::PERLW;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Ord)]
pub enum Look {
Full,
WordChar,
NotWordChar,
NewLine,
Boundary,
Empty,
}
lazy_static! {
static ref FULL: RangeSet<u32> = RangeSet::full();
static ref WORD_CHAR: RangeSet<u32> =
PERLW.iter().map(|&(x, y)| Range::new(x as u32, y as u32)).collect();
static ref NOT_WORD_CHAR: RangeSet<u32> = WORD_CHAR.negated();
static ref NEW_LINE: RangeSet<u32> = RangeSet::single('\n' as u32);
static ref EMPTY: RangeSet<u32> = RangeSet::new();
}
static ALL: [Look; 6] = [Look::Full, Look::WordChar, Look::NotWordChar,
Look::NewLine, Look::Boundary, Look::Empty];
impl PartialOrd for Look {
fn partial_cmp(&self, other: &Look) -> Option<Ordering> {
if self == other {
Some(Ordering::Equal)
} else if self.intersection(other) == *self {
Some(Ordering::Less)
} else if self.intersection(other) == *other {
Some(Ordering::Greater)
} else {
None
}
}
}
impl Look {
pub fn intersection(&self, other: &Look) -> Look {
use self::Look::*;
match *self {
Full => *other,
WordChar => match *other {
Full => WordChar,
WordChar => WordChar,
_ => Empty,
},
NotWordChar => match *other {
Full => NotWordChar,
NotWordChar => NotWordChar,
NewLine => NewLine,
Boundary => Boundary,
_ => Empty,
},
NewLine => match *other {
Full => NewLine,
NotWordChar => NewLine,
NewLine => NewLine,
Boundary => Boundary,
_ => Empty,
},
Boundary => match *other {
WordChar => Empty,
Empty => Empty,
_ => Boundary,
},
Empty => Empty,
}
}
pub fn supersets(&self) -> Vec<Look> {
ALL.iter().cloned().filter(|x| *self <= *x).collect()
}
pub fn as_set(&self) -> &RangeSet<u32> {
use self::Look::*;
match *self {
Full => &FULL,
WordChar => &WORD_CHAR,
NotWordChar => &NOT_WORD_CHAR,
NewLine => &NEW_LINE,
Boundary => &EMPTY,
Empty => &EMPTY,
}
}
pub fn allows_eoi(&self) -> bool {
use self::Look::*;
match *self {
Full => true,
WordChar => false,
NotWordChar => true,
NewLine => true,
Boundary => true,
Empty => false,
}
}
pub fn is_full(&self) -> bool {
match *self {
Look::Full => true,
_ => false,
}
}
pub fn as_usize(&self) -> usize {
use self::Look::*;
match *self {
Full => 0,
WordChar => 1,
NotWordChar => 2,
NewLine => 3,
Boundary => 4,
Empty => 5,
}
}
pub fn num() -> usize { 6 }
pub fn all() -> &'static [Look] {
&ALL
}
}
#[cfg(test)]
mod tests {
use quickcheck::{Arbitrary, Gen, quickcheck};
use super::*;
impl Arbitrary for Look {
fn arbitrary<G: Gen>(g: &mut G) -> Look {
use look::Look::*;
*g.choose(&[Full, WordChar, NotWordChar, NewLine, Boundary, Empty]).unwrap()
}
}
#[test]
fn intersection_commutes() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b) == b.intersection(&a)
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_ordering() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b) <= a
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_eoi() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b).allows_eoi() == (a.allows_eoi() && b.allows_eoi())
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_set() {
fn prop(a: Look, b: Look) -> bool |
quickcheck(prop as fn(_, _) -> _);
}
}
| {
a.intersection(&b).as_set() == &a.as_set().intersection(b.as_set())
} | identifier_body |
look.rs | // Copyright 2015-2016 Joe Neeman.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
use range_map::{Range, RangeSet};
use std::cmp::Ordering;
use unicode::PERLW;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Ord)]
pub enum Look {
Full,
WordChar,
NotWordChar,
NewLine,
Boundary,
Empty,
}
lazy_static! {
static ref FULL: RangeSet<u32> = RangeSet::full();
static ref WORD_CHAR: RangeSet<u32> =
PERLW.iter().map(|&(x, y)| Range::new(x as u32, y as u32)).collect();
static ref NOT_WORD_CHAR: RangeSet<u32> = WORD_CHAR.negated();
static ref NEW_LINE: RangeSet<u32> = RangeSet::single('\n' as u32);
static ref EMPTY: RangeSet<u32> = RangeSet::new();
}
static ALL: [Look; 6] = [Look::Full, Look::WordChar, Look::NotWordChar,
Look::NewLine, Look::Boundary, Look::Empty];
impl PartialOrd for Look {
fn partial_cmp(&self, other: &Look) -> Option<Ordering> {
if self == other {
Some(Ordering::Equal)
} else if self.intersection(other) == *self {
Some(Ordering::Less)
} else if self.intersection(other) == *other {
Some(Ordering::Greater)
} else {
None
}
}
}
impl Look {
pub fn intersection(&self, other: &Look) -> Look {
use self::Look::*;
match *self {
Full => *other,
WordChar => match *other {
Full => WordChar,
WordChar => WordChar,
_ => Empty,
},
NotWordChar => match *other {
Full => NotWordChar,
NotWordChar => NotWordChar,
NewLine => NewLine,
Boundary => Boundary,
_ => Empty,
},
NewLine => match *other {
Full => NewLine,
NotWordChar => NewLine,
NewLine => NewLine,
Boundary => Boundary,
_ => Empty,
},
Boundary => match *other {
WordChar => Empty,
Empty => Empty,
_ => Boundary,
},
Empty => Empty,
}
}
pub fn supersets(&self) -> Vec<Look> {
ALL.iter().cloned().filter(|x| *self <= *x).collect()
}
pub fn as_set(&self) -> &RangeSet<u32> {
use self::Look::*;
match *self {
Full => &FULL,
WordChar => &WORD_CHAR,
NotWordChar => &NOT_WORD_CHAR,
NewLine => &NEW_LINE,
Boundary => &EMPTY,
Empty => &EMPTY,
}
}
pub fn allows_eoi(&self) -> bool {
use self::Look::*;
match *self {
Full => true,
WordChar => false,
NotWordChar => true,
NewLine => true,
Boundary => true,
Empty => false,
}
}
pub fn is_full(&self) -> bool {
match *self {
Look::Full => true,
_ => false,
}
}
pub fn as_usize(&self) -> usize {
use self::Look::*;
match *self {
Full => 0,
WordChar => 1,
NotWordChar => 2,
NewLine => 3,
Boundary => 4,
Empty => 5,
}
}
pub fn num() -> usize { 6 }
pub fn all() -> &'static [Look] {
&ALL
}
}
#[cfg(test)]
mod tests {
use quickcheck::{Arbitrary, Gen, quickcheck};
use super::*;
impl Arbitrary for Look {
fn arbitrary<G: Gen>(g: &mut G) -> Look {
use look::Look::*;
*g.choose(&[Full, WordChar, NotWordChar, NewLine, Boundary, Empty]).unwrap()
}
}
#[test]
fn intersection_commutes() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b) == b.intersection(&a)
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_ordering() {
fn | (a: Look, b: Look) -> bool {
a.intersection(&b) <= a
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_eoi() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b).allows_eoi() == (a.allows_eoi() && b.allows_eoi())
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_set() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b).as_set() == &a.as_set().intersection(b.as_set())
}
quickcheck(prop as fn(_, _) -> _);
}
}
| prop | identifier_name |
look.rs | // Copyright 2015-2016 Joe Neeman.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
use range_map::{Range, RangeSet};
use std::cmp::Ordering;
use unicode::PERLW;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Ord)]
pub enum Look {
Full,
WordChar,
NotWordChar,
NewLine,
Boundary,
Empty,
}
lazy_static! {
static ref FULL: RangeSet<u32> = RangeSet::full();
static ref WORD_CHAR: RangeSet<u32> =
PERLW.iter().map(|&(x, y)| Range::new(x as u32, y as u32)).collect();
static ref NOT_WORD_CHAR: RangeSet<u32> = WORD_CHAR.negated();
static ref NEW_LINE: RangeSet<u32> = RangeSet::single('\n' as u32);
static ref EMPTY: RangeSet<u32> = RangeSet::new();
}
static ALL: [Look; 6] = [Look::Full, Look::WordChar, Look::NotWordChar,
Look::NewLine, Look::Boundary, Look::Empty];
impl PartialOrd for Look {
fn partial_cmp(&self, other: &Look) -> Option<Ordering> {
if self == other {
Some(Ordering::Equal) | None
}
}
}
impl Look {
pub fn intersection(&self, other: &Look) -> Look {
use self::Look::*;
match *self {
Full => *other,
WordChar => match *other {
Full => WordChar,
WordChar => WordChar,
_ => Empty,
},
NotWordChar => match *other {
Full => NotWordChar,
NotWordChar => NotWordChar,
NewLine => NewLine,
Boundary => Boundary,
_ => Empty,
},
NewLine => match *other {
Full => NewLine,
NotWordChar => NewLine,
NewLine => NewLine,
Boundary => Boundary,
_ => Empty,
},
Boundary => match *other {
WordChar => Empty,
Empty => Empty,
_ => Boundary,
},
Empty => Empty,
}
}
pub fn supersets(&self) -> Vec<Look> {
ALL.iter().cloned().filter(|x| *self <= *x).collect()
}
pub fn as_set(&self) -> &RangeSet<u32> {
use self::Look::*;
match *self {
Full => &FULL,
WordChar => &WORD_CHAR,
NotWordChar => &NOT_WORD_CHAR,
NewLine => &NEW_LINE,
Boundary => &EMPTY,
Empty => &EMPTY,
}
}
pub fn allows_eoi(&self) -> bool {
use self::Look::*;
match *self {
Full => true,
WordChar => false,
NotWordChar => true,
NewLine => true,
Boundary => true,
Empty => false,
}
}
pub fn is_full(&self) -> bool {
match *self {
Look::Full => true,
_ => false,
}
}
pub fn as_usize(&self) -> usize {
use self::Look::*;
match *self {
Full => 0,
WordChar => 1,
NotWordChar => 2,
NewLine => 3,
Boundary => 4,
Empty => 5,
}
}
pub fn num() -> usize { 6 }
pub fn all() -> &'static [Look] {
&ALL
}
}
#[cfg(test)]
mod tests {
use quickcheck::{Arbitrary, Gen, quickcheck};
use super::*;
impl Arbitrary for Look {
fn arbitrary<G: Gen>(g: &mut G) -> Look {
use look::Look::*;
*g.choose(&[Full, WordChar, NotWordChar, NewLine, Boundary, Empty]).unwrap()
}
}
#[test]
fn intersection_commutes() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b) == b.intersection(&a)
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_ordering() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b) <= a
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_eoi() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b).allows_eoi() == (a.allows_eoi() && b.allows_eoi())
}
quickcheck(prop as fn(_, _) -> _);
}
#[test]
fn intersection_set() {
fn prop(a: Look, b: Look) -> bool {
a.intersection(&b).as_set() == &a.as_set().intersection(b.as_set())
}
quickcheck(prop as fn(_, _) -> _);
}
} | } else if self.intersection(other) == *self {
Some(Ordering::Less)
} else if self.intersection(other) == *other {
Some(Ordering::Greater)
} else { | random_line_split |
net.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[crate_id = "github.com/mozilla/servo#net:0.1"];
#[crate_type = "lib"];
#[feature(globs, managed_boxes)];
| extern crate http;
extern crate servo_util = "util";
extern crate stb_image;
extern crate extra;
extern crate png;
extern crate serialize;
extern crate sync;
/// Image handling.
///
/// It may be surprising that this goes in the network crate as opposed to the graphics crate.
/// However, image handling is generally very integrated with the network stack (especially where
/// caching is involved) and as a result it must live in here.
pub mod image {
pub mod base;
pub mod holder;
}
pub mod file_loader;
pub mod http_loader;
pub mod data_loader;
pub mod image_cache_task;
pub mod local_image_cache;
pub mod resource_task; | extern crate collections;
extern crate geom; | random_line_split |
borrowck-closures-two-mut.rs | // Tests that two closures cannot simultaneously have mutable
// access to the variable, whether that mutable access be used
// for direct assignment or for taking mutable ref. Issue #6801.
fn to_fn_mut<F: FnMut()>(f: F) -> F { f }
fn a() {
let mut x = 3;
let c1 = to_fn_mut(|| x = 4);
let c2 = to_fn_mut(|| x = 5); //~ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn set(x: &mut isize) {
*x = 4;
}
fn b() {
let mut x = 3;
let c1 = to_fn_mut(|| set(&mut x));
let c2 = to_fn_mut(|| set(&mut x)); //~ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn c() {
let mut x = 3; | drop((c1, c2));
}
fn d() {
let mut x = 3;
let c1 = to_fn_mut(|| x = 5);
let c2 = to_fn_mut(|| { let _y = to_fn_mut(|| set(&mut x)); }); // (nested closure)
//~^ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn g() {
struct Foo {
f: Box<isize>
}
let mut x: Box<_> = Box::new(Foo { f: Box::new(3) });
let c1 = to_fn_mut(|| set(&mut *x.f));
let c2 = to_fn_mut(|| set(&mut *x.f));
//~^ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn main() {
} | let c1 = to_fn_mut(|| x = 5);
let c2 = to_fn_mut(|| set(&mut x)); //~ ERROR cannot borrow `x` as mutable more than once | random_line_split |
borrowck-closures-two-mut.rs | // Tests that two closures cannot simultaneously have mutable
// access to the variable, whether that mutable access be used
// for direct assignment or for taking mutable ref. Issue #6801.
fn to_fn_mut<F: FnMut()>(f: F) -> F { f }
fn a() {
let mut x = 3;
let c1 = to_fn_mut(|| x = 4);
let c2 = to_fn_mut(|| x = 5); //~ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn set(x: &mut isize) {
*x = 4;
}
fn b() {
let mut x = 3;
let c1 = to_fn_mut(|| set(&mut x));
let c2 = to_fn_mut(|| set(&mut x)); //~ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn c() {
let mut x = 3;
let c1 = to_fn_mut(|| x = 5);
let c2 = to_fn_mut(|| set(&mut x)); //~ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn | () {
let mut x = 3;
let c1 = to_fn_mut(|| x = 5);
let c2 = to_fn_mut(|| { let _y = to_fn_mut(|| set(&mut x)); }); // (nested closure)
//~^ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn g() {
struct Foo {
f: Box<isize>
}
let mut x: Box<_> = Box::new(Foo { f: Box::new(3) });
let c1 = to_fn_mut(|| set(&mut *x.f));
let c2 = to_fn_mut(|| set(&mut *x.f));
//~^ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn main() {
}
| d | identifier_name |
borrowck-closures-two-mut.rs | // Tests that two closures cannot simultaneously have mutable
// access to the variable, whether that mutable access be used
// for direct assignment or for taking mutable ref. Issue #6801.
fn to_fn_mut<F: FnMut()>(f: F) -> F |
fn a() {
let mut x = 3;
let c1 = to_fn_mut(|| x = 4);
let c2 = to_fn_mut(|| x = 5); //~ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn set(x: &mut isize) {
*x = 4;
}
fn b() {
let mut x = 3;
let c1 = to_fn_mut(|| set(&mut x));
let c2 = to_fn_mut(|| set(&mut x)); //~ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn c() {
let mut x = 3;
let c1 = to_fn_mut(|| x = 5);
let c2 = to_fn_mut(|| set(&mut x)); //~ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn d() {
let mut x = 3;
let c1 = to_fn_mut(|| x = 5);
let c2 = to_fn_mut(|| { let _y = to_fn_mut(|| set(&mut x)); }); // (nested closure)
//~^ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn g() {
struct Foo {
f: Box<isize>
}
let mut x: Box<_> = Box::new(Foo { f: Box::new(3) });
let c1 = to_fn_mut(|| set(&mut *x.f));
let c2 = to_fn_mut(|| set(&mut *x.f));
//~^ ERROR cannot borrow `x` as mutable more than once
drop((c1, c2));
}
fn main() {
}
| { f } | identifier_body |
pipe.rs | use alloc::arc::{Arc, Weak};
use collections::{BTreeMap, VecDeque};
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use sync::WaitCondition;
use syscall::error::{Error, Result, EBADF, EPIPE};
use syscall::flag::O_NONBLOCK;
use syscall::scheme::Scheme;
/// Pipes list
pub static PIPE_SCHEME_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPE_NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPES: Once<RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)>> = Once::new();
/// Initialize pipes, called if needed
fn init_pipes() -> RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
RwLock::new((BTreeMap::new(), BTreeMap::new()))
}
/// Get the global pipes list, const
fn pipes() -> RwLockReadGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
PIPES.call_once(init_pipes).read()
}
/// Get the global schemes list, mutable
fn pipes_mut() -> RwLockWriteGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
PIPES.call_once(init_pipes).write()
}
pub fn pipe(flags: usize) -> (usize, usize) {
let mut pipes = pipes_mut();
let read_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let write_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let read = PipeRead::new(flags);
let write = PipeWrite::new(&read);
pipes.0.insert(read_id, read);
pipes.1.insert(write_id, write);
(read_id, write_id)
}
pub struct PipeScheme;
impl Scheme for PipeScheme {
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let mut pipes = pipes_mut();
let read_option = pipes.0.get(&id).map(|pipe| pipe.clone());
if let Some(pipe) = read_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.0.insert(pipe_id, pipe);
return Ok(pipe_id);
}
let write_option = pipes.1.get(&id).map(|pipe| pipe.clone());
if let Some(pipe) = write_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.1.insert(pipe_id, pipe);
return Ok(pipe_id);
}
Err(Error::new(EBADF))
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let pipe_option = {
let pipes = pipes();
pipes.0.get(&id).map(|pipe| pipe.clone())
};
if let Some(pipe) = pipe_option {
pipe.read(buf)
} else {
Err(Error::new(EBADF))
}
}
fn | (&self, id: usize, buf: &[u8]) -> Result<usize> {
let pipe_option = {
let pipes = pipes();
pipes.1.get(&id).map(|pipe| pipe.clone())
};
if let Some(pipe) = pipe_option {
pipe.write(buf)
} else {
Err(Error::new(EBADF))
}
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
let mut pipes = pipes_mut();
drop(pipes.0.remove(&id));
drop(pipes.1.remove(&id));
Ok(0)
}
}
/// Read side of a pipe
#[derive(Clone)]
pub struct PipeRead {
flags: usize,
condition: Arc<WaitCondition>,
vec: Arc<Mutex<VecDeque<u8>>>
}
impl PipeRead {
pub fn new(flags: usize) -> Self {
PipeRead {
flags: flags,
condition: Arc::new(WaitCondition::new()),
vec: Arc::new(Mutex::new(VecDeque::new())),
}
}
fn read(&self, buf: &mut [u8]) -> Result<usize> {
loop {
{
let mut vec = self.vec.lock();
let mut i = 0;
while i < buf.len() {
if let Some(b) = vec.pop_front() {
buf[i] = b;
i += 1;
} else {
break;
}
}
if i > 0 {
return Ok(i);
}
}
if self.flags & O_NONBLOCK == O_NONBLOCK || Arc::weak_count(&self.vec) == 0 {
return Ok(0);
} else {
self.condition.wait();
}
}
}
}
/// Read side of a pipe
#[derive(Clone)]
pub struct PipeWrite {
condition: Arc<WaitCondition>,
vec: Weak<Mutex<VecDeque<u8>>>
}
impl PipeWrite {
pub fn new(read: &PipeRead) -> Self {
PipeWrite {
condition: read.condition.clone(),
vec: Arc::downgrade(&read.vec),
}
}
fn write(&self, buf: &[u8]) -> Result<usize> {
if let Some(vec_lock) = self.vec.upgrade() {
let mut vec = vec_lock.lock();
for &b in buf.iter() {
vec.push_back(b);
}
self.condition.notify();
Ok(buf.len())
} else {
Err(Error::new(EPIPE))
}
}
}
impl Drop for PipeWrite {
fn drop(&mut self) {
self.condition.notify();
}
}
| write | identifier_name |
pipe.rs | use alloc::arc::{Arc, Weak};
use collections::{BTreeMap, VecDeque};
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use sync::WaitCondition;
use syscall::error::{Error, Result, EBADF, EPIPE};
use syscall::flag::O_NONBLOCK;
use syscall::scheme::Scheme;
/// Pipes list
pub static PIPE_SCHEME_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPE_NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPES: Once<RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)>> = Once::new();
/// Initialize pipes, called if needed
fn init_pipes() -> RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
RwLock::new((BTreeMap::new(), BTreeMap::new()))
}
/// Get the global pipes list, const
fn pipes() -> RwLockReadGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
PIPES.call_once(init_pipes).read()
}
/// Get the global schemes list, mutable
fn pipes_mut() -> RwLockWriteGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
PIPES.call_once(init_pipes).write()
}
pub fn pipe(flags: usize) -> (usize, usize) {
let mut pipes = pipes_mut();
let read_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let write_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let read = PipeRead::new(flags);
let write = PipeWrite::new(&read);
pipes.0.insert(read_id, read);
pipes.1.insert(write_id, write);
(read_id, write_id)
}
pub struct PipeScheme;
impl Scheme for PipeScheme {
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let mut pipes = pipes_mut();
let read_option = pipes.0.get(&id).map(|pipe| pipe.clone());
if let Some(pipe) = read_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.0.insert(pipe_id, pipe);
return Ok(pipe_id);
}
let write_option = pipes.1.get(&id).map(|pipe| pipe.clone());
if let Some(pipe) = write_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.1.insert(pipe_id, pipe);
return Ok(pipe_id);
}
Err(Error::new(EBADF))
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let pipe_option = {
let pipes = pipes();
pipes.0.get(&id).map(|pipe| pipe.clone())
};
if let Some(pipe) = pipe_option {
pipe.read(buf)
} else {
Err(Error::new(EBADF))
}
}
fn write(&self, id: usize, buf: &[u8]) -> Result<usize> {
let pipe_option = {
let pipes = pipes();
pipes.1.get(&id).map(|pipe| pipe.clone())
};
if let Some(pipe) = pipe_option {
pipe.write(buf)
} else {
Err(Error::new(EBADF))
}
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
let mut pipes = pipes_mut();
drop(pipes.0.remove(&id));
drop(pipes.1.remove(&id));
Ok(0)
}
}
/// Read side of a pipe
#[derive(Clone)]
pub struct PipeRead {
flags: usize,
condition: Arc<WaitCondition>,
vec: Arc<Mutex<VecDeque<u8>>>
}
impl PipeRead {
pub fn new(flags: usize) -> Self {
PipeRead {
flags: flags,
condition: Arc::new(WaitCondition::new()), | vec: Arc::new(Mutex::new(VecDeque::new())),
}
}
fn read(&self, buf: &mut [u8]) -> Result<usize> {
loop {
{
let mut vec = self.vec.lock();
let mut i = 0;
while i < buf.len() {
if let Some(b) = vec.pop_front() {
buf[i] = b;
i += 1;
} else {
break;
}
}
if i > 0 {
return Ok(i);
}
}
if self.flags & O_NONBLOCK == O_NONBLOCK || Arc::weak_count(&self.vec) == 0 {
return Ok(0);
} else {
self.condition.wait();
}
}
}
}
/// Read side of a pipe
#[derive(Clone)]
pub struct PipeWrite {
condition: Arc<WaitCondition>,
vec: Weak<Mutex<VecDeque<u8>>>
}
impl PipeWrite {
pub fn new(read: &PipeRead) -> Self {
PipeWrite {
condition: read.condition.clone(),
vec: Arc::downgrade(&read.vec),
}
}
fn write(&self, buf: &[u8]) -> Result<usize> {
if let Some(vec_lock) = self.vec.upgrade() {
let mut vec = vec_lock.lock();
for &b in buf.iter() {
vec.push_back(b);
}
self.condition.notify();
Ok(buf.len())
} else {
Err(Error::new(EPIPE))
}
}
}
impl Drop for PipeWrite {
fn drop(&mut self) {
self.condition.notify();
}
} | random_line_split |
|
pipe.rs | use alloc::arc::{Arc, Weak};
use collections::{BTreeMap, VecDeque};
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use sync::WaitCondition;
use syscall::error::{Error, Result, EBADF, EPIPE};
use syscall::flag::O_NONBLOCK;
use syscall::scheme::Scheme;
/// Pipes list
pub static PIPE_SCHEME_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPE_NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPES: Once<RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)>> = Once::new();
/// Initialize pipes, called if needed
fn init_pipes() -> RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
RwLock::new((BTreeMap::new(), BTreeMap::new()))
}
/// Get the global pipes list, const
fn pipes() -> RwLockReadGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
PIPES.call_once(init_pipes).read()
}
/// Get the global schemes list, mutable
fn pipes_mut() -> RwLockWriteGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
PIPES.call_once(init_pipes).write()
}
pub fn pipe(flags: usize) -> (usize, usize) {
let mut pipes = pipes_mut();
let read_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let write_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let read = PipeRead::new(flags);
let write = PipeWrite::new(&read);
pipes.0.insert(read_id, read);
pipes.1.insert(write_id, write);
(read_id, write_id)
}
pub struct PipeScheme;
impl Scheme for PipeScheme {
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let mut pipes = pipes_mut();
let read_option = pipes.0.get(&id).map(|pipe| pipe.clone());
if let Some(pipe) = read_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.0.insert(pipe_id, pipe);
return Ok(pipe_id);
}
let write_option = pipes.1.get(&id).map(|pipe| pipe.clone());
if let Some(pipe) = write_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.1.insert(pipe_id, pipe);
return Ok(pipe_id);
}
Err(Error::new(EBADF))
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let pipe_option = {
let pipes = pipes();
pipes.0.get(&id).map(|pipe| pipe.clone())
};
if let Some(pipe) = pipe_option {
pipe.read(buf)
} else {
Err(Error::new(EBADF))
}
}
fn write(&self, id: usize, buf: &[u8]) -> Result<usize> {
let pipe_option = {
let pipes = pipes();
pipes.1.get(&id).map(|pipe| pipe.clone())
};
if let Some(pipe) = pipe_option {
pipe.write(buf)
} else {
Err(Error::new(EBADF))
}
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
let mut pipes = pipes_mut();
drop(pipes.0.remove(&id));
drop(pipes.1.remove(&id));
Ok(0)
}
}
/// Read side of a pipe
#[derive(Clone)]
pub struct PipeRead {
flags: usize,
condition: Arc<WaitCondition>,
vec: Arc<Mutex<VecDeque<u8>>>
}
impl PipeRead {
pub fn new(flags: usize) -> Self {
PipeRead {
flags: flags,
condition: Arc::new(WaitCondition::new()),
vec: Arc::new(Mutex::new(VecDeque::new())),
}
}
fn read(&self, buf: &mut [u8]) -> Result<usize> | if self.flags & O_NONBLOCK == O_NONBLOCK || Arc::weak_count(&self.vec) == 0 {
return Ok(0);
} else {
self.condition.wait();
}
}
}
}
/// Read side of a pipe
#[derive(Clone)]
pub struct PipeWrite {
condition: Arc<WaitCondition>,
vec: Weak<Mutex<VecDeque<u8>>>
}
impl PipeWrite {
pub fn new(read: &PipeRead) -> Self {
PipeWrite {
condition: read.condition.clone(),
vec: Arc::downgrade(&read.vec),
}
}
fn write(&self, buf: &[u8]) -> Result<usize> {
if let Some(vec_lock) = self.vec.upgrade() {
let mut vec = vec_lock.lock();
for &b in buf.iter() {
vec.push_back(b);
}
self.condition.notify();
Ok(buf.len())
} else {
Err(Error::new(EPIPE))
}
}
}
impl Drop for PipeWrite {
fn drop(&mut self) {
self.condition.notify();
}
}
| {
loop {
{
let mut vec = self.vec.lock();
let mut i = 0;
while i < buf.len() {
if let Some(b) = vec.pop_front() {
buf[i] = b;
i += 1;
} else {
break;
}
}
if i > 0 {
return Ok(i);
}
}
| identifier_body |
pipe.rs | use alloc::arc::{Arc, Weak};
use collections::{BTreeMap, VecDeque};
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use sync::WaitCondition;
use syscall::error::{Error, Result, EBADF, EPIPE};
use syscall::flag::O_NONBLOCK;
use syscall::scheme::Scheme;
/// Pipes list
pub static PIPE_SCHEME_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPE_NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPES: Once<RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)>> = Once::new();
/// Initialize pipes, called if needed
fn init_pipes() -> RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
RwLock::new((BTreeMap::new(), BTreeMap::new()))
}
/// Get the global pipes list, const
fn pipes() -> RwLockReadGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
PIPES.call_once(init_pipes).read()
}
/// Get the global schemes list, mutable
fn pipes_mut() -> RwLockWriteGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
PIPES.call_once(init_pipes).write()
}
pub fn pipe(flags: usize) -> (usize, usize) {
let mut pipes = pipes_mut();
let read_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let write_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let read = PipeRead::new(flags);
let write = PipeWrite::new(&read);
pipes.0.insert(read_id, read);
pipes.1.insert(write_id, write);
(read_id, write_id)
}
pub struct PipeScheme;
impl Scheme for PipeScheme {
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let mut pipes = pipes_mut();
let read_option = pipes.0.get(&id).map(|pipe| pipe.clone());
if let Some(pipe) = read_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.0.insert(pipe_id, pipe);
return Ok(pipe_id);
}
let write_option = pipes.1.get(&id).map(|pipe| pipe.clone());
if let Some(pipe) = write_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.1.insert(pipe_id, pipe);
return Ok(pipe_id);
}
Err(Error::new(EBADF))
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let pipe_option = {
let pipes = pipes();
pipes.0.get(&id).map(|pipe| pipe.clone())
};
if let Some(pipe) = pipe_option {
pipe.read(buf)
} else {
Err(Error::new(EBADF))
}
}
fn write(&self, id: usize, buf: &[u8]) -> Result<usize> {
let pipe_option = {
let pipes = pipes();
pipes.1.get(&id).map(|pipe| pipe.clone())
};
if let Some(pipe) = pipe_option {
pipe.write(buf)
} else {
Err(Error::new(EBADF))
}
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
let mut pipes = pipes_mut();
drop(pipes.0.remove(&id));
drop(pipes.1.remove(&id));
Ok(0)
}
}
/// Read side of a pipe
#[derive(Clone)]
pub struct PipeRead {
flags: usize,
condition: Arc<WaitCondition>,
vec: Arc<Mutex<VecDeque<u8>>>
}
impl PipeRead {
pub fn new(flags: usize) -> Self {
PipeRead {
flags: flags,
condition: Arc::new(WaitCondition::new()),
vec: Arc::new(Mutex::new(VecDeque::new())),
}
}
fn read(&self, buf: &mut [u8]) -> Result<usize> {
loop {
{
let mut vec = self.vec.lock();
let mut i = 0;
while i < buf.len() {
if let Some(b) = vec.pop_front() {
buf[i] = b;
i += 1;
} else {
break;
}
}
if i > 0 {
return Ok(i);
}
}
if self.flags & O_NONBLOCK == O_NONBLOCK || Arc::weak_count(&self.vec) == 0 {
return Ok(0);
} else {
self.condition.wait();
}
}
}
}
/// Read side of a pipe
#[derive(Clone)]
pub struct PipeWrite {
condition: Arc<WaitCondition>,
vec: Weak<Mutex<VecDeque<u8>>>
}
impl PipeWrite {
pub fn new(read: &PipeRead) -> Self {
PipeWrite {
condition: read.condition.clone(),
vec: Arc::downgrade(&read.vec),
}
}
fn write(&self, buf: &[u8]) -> Result<usize> {
if let Some(vec_lock) = self.vec.upgrade() | else {
Err(Error::new(EPIPE))
}
}
}
impl Drop for PipeWrite {
fn drop(&mut self) {
self.condition.notify();
}
}
| {
let mut vec = vec_lock.lock();
for &b in buf.iter() {
vec.push_back(b);
}
self.condition.notify();
Ok(buf.len())
} | conditional_block |
mock_alarm.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub struct MockAlarm {
current_time: core::cell::Cell<kernel::hil::time::Ticks32>,
setpoint: core::cell::Cell<Option<kernel::hil::time::Ticks32>>,
}
impl MockAlarm {
pub fn new() -> MockAlarm {
MockAlarm {
current_time: core::cell::Cell::new(0.into()),
setpoint: core::cell::Cell::new(Some(0.into())),
}
}
pub fn set_time(&self, new_time: kernel::hil::time::Ticks32) { self.current_time.set(new_time); }
}
impl kernel::hil::time::Time for MockAlarm {
type Frequency = h1::timels::Freq256Khz;
type Ticks = kernel::hil::time::Ticks32;
fn now(&self) -> Self::Ticks { self.current_time.get() } | use kernel::hil::time::Ticks;
self.setpoint.set(Some(reference.wrapping_add(dt)));
}
fn get_alarm(&self) -> Self::Ticks { self.setpoint.get().unwrap_or(0.into()) }
// Ignored -- the test should manually trigger the client.
fn set_alarm_client(&'a self, _client: &'a dyn kernel::hil::time::AlarmClient) {}
fn is_armed(&self) -> bool { self.setpoint.get().is_some() }
fn disarm(&self) -> kernel::ReturnCode {
self.setpoint.set(None);
kernel::ReturnCode::SUCCESS
}
fn minimum_dt(&self) -> Self::Ticks { 1.into() }
} | }
impl<'a> kernel::hil::time::Alarm<'a> for MockAlarm {
fn set_alarm(&self, reference: Self::Ticks, dt: Self::Ticks) { | random_line_split |
mock_alarm.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub struct MockAlarm {
current_time: core::cell::Cell<kernel::hil::time::Ticks32>,
setpoint: core::cell::Cell<Option<kernel::hil::time::Ticks32>>,
}
impl MockAlarm {
pub fn new() -> MockAlarm {
MockAlarm {
current_time: core::cell::Cell::new(0.into()),
setpoint: core::cell::Cell::new(Some(0.into())),
}
}
pub fn set_time(&self, new_time: kernel::hil::time::Ticks32) { self.current_time.set(new_time); }
}
impl kernel::hil::time::Time for MockAlarm {
type Frequency = h1::timels::Freq256Khz;
type Ticks = kernel::hil::time::Ticks32;
fn now(&self) -> Self::Ticks { self.current_time.get() }
}
impl<'a> kernel::hil::time::Alarm<'a> for MockAlarm {
fn set_alarm(&self, reference: Self::Ticks, dt: Self::Ticks) {
use kernel::hil::time::Ticks;
self.setpoint.set(Some(reference.wrapping_add(dt)));
}
fn get_alarm(&self) -> Self::Ticks { self.setpoint.get().unwrap_or(0.into()) }
// Ignored -- the test should manually trigger the client.
fn set_alarm_client(&'a self, _client: &'a dyn kernel::hil::time::AlarmClient) {}
fn is_armed(&self) -> bool |
fn disarm(&self) -> kernel::ReturnCode {
self.setpoint.set(None);
kernel::ReturnCode::SUCCESS
}
fn minimum_dt(&self) -> Self::Ticks { 1.into() }
}
| { self.setpoint.get().is_some() } | identifier_body |
mock_alarm.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub struct | {
current_time: core::cell::Cell<kernel::hil::time::Ticks32>,
setpoint: core::cell::Cell<Option<kernel::hil::time::Ticks32>>,
}
impl MockAlarm {
pub fn new() -> MockAlarm {
MockAlarm {
current_time: core::cell::Cell::new(0.into()),
setpoint: core::cell::Cell::new(Some(0.into())),
}
}
pub fn set_time(&self, new_time: kernel::hil::time::Ticks32) { self.current_time.set(new_time); }
}
impl kernel::hil::time::Time for MockAlarm {
type Frequency = h1::timels::Freq256Khz;
type Ticks = kernel::hil::time::Ticks32;
fn now(&self) -> Self::Ticks { self.current_time.get() }
}
impl<'a> kernel::hil::time::Alarm<'a> for MockAlarm {
fn set_alarm(&self, reference: Self::Ticks, dt: Self::Ticks) {
use kernel::hil::time::Ticks;
self.setpoint.set(Some(reference.wrapping_add(dt)));
}
fn get_alarm(&self) -> Self::Ticks { self.setpoint.get().unwrap_or(0.into()) }
// Ignored -- the test should manually trigger the client.
fn set_alarm_client(&'a self, _client: &'a dyn kernel::hil::time::AlarmClient) {}
fn is_armed(&self) -> bool { self.setpoint.get().is_some() }
fn disarm(&self) -> kernel::ReturnCode {
self.setpoint.set(None);
kernel::ReturnCode::SUCCESS
}
fn minimum_dt(&self) -> Self::Ticks { 1.into() }
}
| MockAlarm | identifier_name |
color.rs | use na::Vector3;
use std::fmt;
use std::ops::{Mul, Add, Div};
use std::cmp::Ordering;
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Color {
pub rgb: Vector3<f64>
}
impl Color {
pub fn new(r:f64, g:f64, b:f64) -> Color {
return Color {
rgb: Vector3::new(r,g,b)
}
}
pub fn black() -> Color {
Color::new(0f64,0f64,0f64)
}
pub fn white() -> Color {
return Color::new(1f64,1f64,1f64);
}
pub fn red() -> Color {
return Color::new(1f64,0f64,0f64);
}
pub fn blue() -> Color {
return Color::new(0f64,0f64,01f64);
}
pub fn green() -> Color {
return Color::new(0f64,1f64,0f64);
}
pub fn to_u8(&self) -> (u8, u8, u8) {
return ((self.rgb[0] * 255f64).min(255f64) as u8, (self.rgb[1] * 255f64).min(255f64) as u8, (self.rgb[2] * 255f64).min(255f64) as u8);
}
pub fn to_vec(&self) -> Vector3<f64> {
return self.rgb.clone();
}
pub fn clamp(&self, val: f64) -> Color {
return Color::new(self.rgb.x.min(val), self.rgb.y.min(val), self.rgb.z.min(val));
}
pub fn min() -> Color {
return Color::new(1./255.,1./255.,1./255.);
}
pub fn ignore_nan(&self) -> Color {
return Color::new(
if self.rgb.x.is_nan() { 0. } else { self.rgb.x },
if self.rgb.y.is_nan() | else { self.rgb.y },
if self.rgb.z.is_nan() { 0. } else { self.rgb.z },
);
}
}
impl fmt::Display for Color {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{:0>2x}{:0>2x}{:0>2x}", (self.rgb.x * 255f64) as u8, (self.rgb.y * 255f64) as u8, (self.rgb.z * 255f64) as u8)
}
}
impl Mul<Vector3<f64>> for Color {
type Output = Color;
fn mul(self, _rhs: Vector3<f64>) -> Color {
Color {
rgb: _rhs.component_mul(&self.rgb)
}
}
}
impl Mul<Color> for Color {
type Output = Color;
fn mul(self, _rhs: Color) -> Color {
Color {rgb: (_rhs * self.to_vec()).to_vec() }
}
}
impl Mul<f64> for Color {
type Output = Color;
fn mul(self, _rhs: f64) -> Color {
Color {rgb: self.rgb * _rhs }
}
}
impl Add<Color> for Color {
type Output = Color;
fn add(self, _rhs: Color) -> Color {
Color {rgb: self.rgb + _rhs.rgb }
}
}
impl Add<Vector3<f64>> for Color {
type Output = Color;
fn add(self, _rhs: Vector3<f64>) -> Color {
Color {
rgb: _rhs + &self.rgb
}
}
}
impl Div<f64> for Color {
type Output = Color;
fn div(self, _rhs: f64) -> Color {
Color {rgb: self.rgb / _rhs }
}
}
| { 0. } | conditional_block |
color.rs | use na::Vector3;
use std::fmt;
use std::ops::{Mul, Add, Div};
use std::cmp::Ordering;
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Color {
pub rgb: Vector3<f64>
}
impl Color {
pub fn new(r:f64, g:f64, b:f64) -> Color {
return Color {
rgb: Vector3::new(r,g,b)
}
}
pub fn black() -> Color {
Color::new(0f64,0f64,0f64)
}
pub fn white() -> Color {
return Color::new(1f64,1f64,1f64);
}
pub fn red() -> Color {
return Color::new(1f64,0f64,0f64);
}
pub fn blue() -> Color {
return Color::new(0f64,0f64,01f64);
}
pub fn green() -> Color {
return Color::new(0f64,1f64,0f64);
}
pub fn to_u8(&self) -> (u8, u8, u8) {
return ((self.rgb[0] * 255f64).min(255f64) as u8, (self.rgb[1] * 255f64).min(255f64) as u8, (self.rgb[2] * 255f64).min(255f64) as u8);
}
pub fn to_vec(&self) -> Vector3<f64> {
return self.rgb.clone();
}
pub fn clamp(&self, val: f64) -> Color {
return Color::new(self.rgb.x.min(val), self.rgb.y.min(val), self.rgb.z.min(val));
}
pub fn min() -> Color {
return Color::new(1./255.,1./255.,1./255.);
}
pub fn ignore_nan(&self) -> Color {
return Color::new(
if self.rgb.x.is_nan() { 0. } else { self.rgb.x },
if self.rgb.y.is_nan() { 0. } else { self.rgb.y },
if self.rgb.z.is_nan() { 0. } else { self.rgb.z },
);
}
}
impl fmt::Display for Color {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{:0>2x}{:0>2x}{:0>2x}", (self.rgb.x * 255f64) as u8, (self.rgb.y * 255f64) as u8, (self.rgb.z * 255f64) as u8)
}
}
impl Mul<Vector3<f64>> for Color {
type Output = Color;
fn mul(self, _rhs: Vector3<f64>) -> Color {
Color {
rgb: _rhs.component_mul(&self.rgb)
}
}
}
impl Mul<Color> for Color {
type Output = Color;
fn mul(self, _rhs: Color) -> Color {
Color {rgb: (_rhs * self.to_vec()).to_vec() }
}
}
impl Mul<f64> for Color {
type Output = Color;
fn | (self, _rhs: f64) -> Color {
Color {rgb: self.rgb * _rhs }
}
}
impl Add<Color> for Color {
type Output = Color;
fn add(self, _rhs: Color) -> Color {
Color {rgb: self.rgb + _rhs.rgb }
}
}
impl Add<Vector3<f64>> for Color {
type Output = Color;
fn add(self, _rhs: Vector3<f64>) -> Color {
Color {
rgb: _rhs + &self.rgb
}
}
}
impl Div<f64> for Color {
type Output = Color;
fn div(self, _rhs: f64) -> Color {
Color {rgb: self.rgb / _rhs }
}
}
| mul | identifier_name |
color.rs | use na::Vector3;
use std::fmt;
use std::ops::{Mul, Add, Div};
use std::cmp::Ordering;
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Color {
pub rgb: Vector3<f64>
}
impl Color {
pub fn new(r:f64, g:f64, b:f64) -> Color {
return Color {
rgb: Vector3::new(r,g,b)
}
}
pub fn black() -> Color {
Color::new(0f64,0f64,0f64)
}
pub fn white() -> Color {
return Color::new(1f64,1f64,1f64);
}
pub fn red() -> Color {
return Color::new(1f64,0f64,0f64);
}
pub fn blue() -> Color {
return Color::new(0f64,0f64,01f64);
}
pub fn green() -> Color {
return Color::new(0f64,1f64,0f64);
}
pub fn to_u8(&self) -> (u8, u8, u8) {
return ((self.rgb[0] * 255f64).min(255f64) as u8, (self.rgb[1] * 255f64).min(255f64) as u8, (self.rgb[2] * 255f64).min(255f64) as u8);
}
pub fn to_vec(&self) -> Vector3<f64> {
return self.rgb.clone();
}
pub fn clamp(&self, val: f64) -> Color {
return Color::new(self.rgb.x.min(val), self.rgb.y.min(val), self.rgb.z.min(val));
}
pub fn min() -> Color {
return Color::new(1./255.,1./255.,1./255.);
}
pub fn ignore_nan(&self) -> Color {
return Color::new(
if self.rgb.x.is_nan() { 0. } else { self.rgb.x },
if self.rgb.y.is_nan() { 0. } else { self.rgb.y },
if self.rgb.z.is_nan() { 0. } else { self.rgb.z }, | impl fmt::Display for Color {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{:0>2x}{:0>2x}{:0>2x}", (self.rgb.x * 255f64) as u8, (self.rgb.y * 255f64) as u8, (self.rgb.z * 255f64) as u8)
}
}
impl Mul<Vector3<f64>> for Color {
type Output = Color;
fn mul(self, _rhs: Vector3<f64>) -> Color {
Color {
rgb: _rhs.component_mul(&self.rgb)
}
}
}
impl Mul<Color> for Color {
type Output = Color;
fn mul(self, _rhs: Color) -> Color {
Color {rgb: (_rhs * self.to_vec()).to_vec() }
}
}
impl Mul<f64> for Color {
type Output = Color;
fn mul(self, _rhs: f64) -> Color {
Color {rgb: self.rgb * _rhs }
}
}
impl Add<Color> for Color {
type Output = Color;
fn add(self, _rhs: Color) -> Color {
Color {rgb: self.rgb + _rhs.rgb }
}
}
impl Add<Vector3<f64>> for Color {
type Output = Color;
fn add(self, _rhs: Vector3<f64>) -> Color {
Color {
rgb: _rhs + &self.rgb
}
}
}
impl Div<f64> for Color {
type Output = Color;
fn div(self, _rhs: f64) -> Color {
Color {rgb: self.rgb / _rhs }
}
} | );
}
}
| random_line_split |
guard.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::JSContext;
use js::rust::HandleObject;
use servo_config::prefs::PREFS;
/// A container with a condition.
pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else |
}
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => PREFS.get(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
| {
None
} | conditional_block |
guard.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::JSContext;
use js::rust::HandleObject;
use servo_config::prefs::PREFS;
/// A container with a condition. | }
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
}
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => PREFS.get(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
} | pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T, | random_line_split |
guard.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::JSContext;
use js::rust::HandleObject;
use servo_config::prefs::PREFS;
/// A container with a condition.
pub struct | <T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
}
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => PREFS.get(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
| Guard | identifier_name |
mod.rs | //! If an extern token is provided, then this pass validates that
//! terminal IDs have conversions. Otherwise, it generates a
//! tokenizer. This can only be done after macro expansion because
//! some macro arguments never make it into an actual production and
//! are only used in `if` conditions; we use string literals for
//! those, but they do not have to have a defined conversion.
use super::{NormResult, NormError};
use intern::{self, intern};
use lexer::re;
use lexer::dfa::{self, DFAConstructionError, Precedence};
use lexer::nfa::NFAConstructionError::*;
use grammar::consts::*;
use grammar::parse_tree::*;
use collections::Set;
use collections::{map, Map};
#[cfg(test)]
mod test;
pub fn validate(mut grammar: Grammar) -> NormResult<Grammar> {
let (has_enum_token, all_literals) = {
let opt_enum_token = grammar.enum_token();
let conversions = opt_enum_token.map(|et| {
et.conversions.iter()
.map(|conversion| conversion.from)
.collect()
});
let mut validator = Validator {
grammar: &grammar,
all_literals: map(),
conversions: conversions,
};
try!(validator.validate());
(opt_enum_token.is_some(), validator.all_literals)
};
if!has_enum_token {
try!(construct(&mut grammar, all_literals));
}
Ok(grammar)
}
///////////////////////////////////////////////////////////////////////////
// Validation phase -- this phase walks the grammar and visits all
// terminals. If using an external set of tokens, it checks that all
// terminals have a defined conversion to some pattern. Otherwise,
// it collects all terminals into the `all_literals` set for later use.
struct Validator<'grammar> {
grammar: &'grammar Grammar,
all_literals: Map<TerminalLiteral, Span>,
conversions: Option<Set<TerminalString>>,
}
impl<'grammar> Validator<'grammar> {
fn validate(&mut self) -> NormResult<()> {
for item in &self.grammar.items {
match *item {
GrammarItem::Use(..) => { }
GrammarItem::ExternToken(_) => { }
GrammarItem::InternToken(_) => { }
GrammarItem::Nonterminal(ref data) => |
}
}
Ok(())
}
fn validate_alternative(&mut self, alternative: &Alternative) -> NormResult<()> {
assert!(alternative.condition.is_none()); // macro expansion should have removed these
try!(self.validate_expr(&alternative.expr));
Ok(())
}
fn validate_expr(&mut self, expr: &ExprSymbol) -> NormResult<()> {
for symbol in &expr.symbols {
try!(self.validate_symbol(symbol));
}
Ok(())
}
fn validate_symbol(&mut self, symbol: &Symbol) -> NormResult<()> {
match symbol.kind {
SymbolKind::Expr(ref expr) => {
try!(self.validate_expr(expr));
}
SymbolKind::Terminal(term) => {
try!(self.validate_terminal(symbol.span, term));
}
SymbolKind::Nonterminal(_) => {
}
SymbolKind::Repeat(ref repeat) => {
try!(self.validate_symbol(&repeat.symbol));
}
SymbolKind::Choose(ref sym) | SymbolKind::Name(_, ref sym) => {
try!(self.validate_symbol(sym));
}
SymbolKind::Lookahead | SymbolKind::Lookbehind => {
}
SymbolKind::AmbiguousId(id) => {
panic!("ambiguous id `{}` encountered after name resolution", id)
}
SymbolKind::Macro(..) => {
panic!("macro not removed: {:?}", symbol);
}
}
Ok(())
}
fn validate_terminal(&mut self, span: Span, term: TerminalString) -> NormResult<()> {
match self.conversions {
// If there is an extern token definition, validate that
// this terminal has a defined conversion.
Some(ref c) => {
if!c.contains(&term) {
return_err!(span, "terminal `{}` does not have a pattern defined for it",
term);
}
}
// If there is no extern token definition, then collect
// the terminal literals ("class", r"[a-z]+") into a set.
None => match term {
TerminalString::Bare(c) => {
// Bare identifiers like `x` can never be resolved
// as terminals unless there is a conversion
// defined for them that indicates they are a
// terminal; otherwise it's just an unresolved
// identifier.
panic!("bare literal `{}` without extern token definition", c);
}
TerminalString::Literal(l) => {
self.all_literals.entry(l).or_insert(span);
}
}
}
Ok(())
}
}
///////////////////////////////////////////////////////////////////////////
// Construction phase -- if we are constructing a tokenizer, this
// phase builds up an internal token DFA.
pub fn construct(grammar: &mut Grammar, literals_map: Map<TerminalLiteral, Span>) -> NormResult<()> {
let literals: Vec<TerminalLiteral> =
literals_map.keys()
.cloned()
.collect();
// Build up two vectors, one of parsed regular expressions and
// one of precedences, that are parallel with `literals`.
let mut regexs = Vec::with_capacity(literals.len());
let mut precedences = Vec::with_capacity(literals.len());
try!(intern::read(|interner| {
for &literal in &literals {
match literal {
TerminalLiteral::Quoted(s) => {
precedences.push(Precedence(1));
regexs.push(re::parse_literal(interner.data(s)));
}
TerminalLiteral::Regex(s) => {
precedences.push(Precedence(0));
match re::parse_regex(interner.data(s)) {
Ok(regex) => regexs.push(regex),
Err(error) => {
let literal_span = literals_map[&literal];
// FIXME -- take offset into account for
// span; this requires knowing how many #
// the user used, which we do not track
return_err!(
literal_span,
"invalid regular expression: {}",
error);
}
}
}
}
}
Ok(())
}));
let dfa = match dfa::build_dfa(®exs, &precedences) {
Ok(dfa) => dfa,
Err(DFAConstructionError::NFAConstructionError { index, error }) => {
let feature = match error {
NamedCaptures => r#"named captures (`(?P<foo>...)`)"#,
NonGreedy => r#""non-greedy" repetitions (`*?` or `+?`)"#,
WordBoundary => r#"word boundaries (`\b` or `\B`)"#,
LineBoundary => r#"line boundaries (`^` or `$`)"#,
TextBoundary => r#"text boundaries (`^` or `$`)"#,
};
let literal = literals[index.index()];
let span = literals_map[&literal];
return_err!(
span,
"{} are not supported in regular expressions",
feature)
}
Err(DFAConstructionError::Ambiguity { match0, match1 }) => {
let literal0 = literals[match0.index()];
let literal1 = literals[match1.index()];
let span0 = literals_map[&literal0];
let _span1 = literals_map[&literal1];
// FIXME(#88) -- it'd be nice to give an example here
return_err!(
span0,
"ambiguity detected between the terminal `{}` and the terminal `{}`",
literal0, literal1);
}
};
grammar.items.push(GrammarItem::InternToken(InternToken {
literals: literals,
dfa: dfa
}));
// we need to inject a `'input` lifetime and `input: &'input str` parameter as well:
let input_lifetime = intern(INPUT_LIFETIME);
for parameter in &grammar.type_parameters {
match *parameter {
TypeParameter::Lifetime(i) if i == input_lifetime => {
return_err!(
grammar.span,
"since there is no external token enum specified, \
the `'input` lifetime is implicit and cannot be declared");
}
_ => { }
}
}
let input_parameter = intern(INPUT_PARAMETER);
for parameter in &grammar.parameters {
if parameter.name == input_parameter {
return_err!(
grammar.span,
"since there is no external token enum specified, \
the `input` parameter is implicit and cannot be declared");
}
}
grammar.type_parameters.insert(0, TypeParameter::Lifetime(input_lifetime));
let parameter = Parameter {
name: input_parameter,
ty: TypeRef::Ref {
lifetime: Some(input_lifetime),
mutable: false,
referent: Box::new(TypeRef::Id(intern("str")))
}
};
grammar.parameters.push(parameter);
Ok(())
}
| {
for alternative in &data.alternatives {
try!(self.validate_alternative(alternative));
}
} | conditional_block |
mod.rs | //! If an extern token is provided, then this pass validates that
//! terminal IDs have conversions. Otherwise, it generates a
//! tokenizer. This can only be done after macro expansion because
//! some macro arguments never make it into an actual production and
//! are only used in `if` conditions; we use string literals for
//! those, but they do not have to have a defined conversion.
use super::{NormResult, NormError};
use intern::{self, intern};
use lexer::re;
use lexer::dfa::{self, DFAConstructionError, Precedence};
use lexer::nfa::NFAConstructionError::*;
use grammar::consts::*;
use grammar::parse_tree::*;
use collections::Set;
use collections::{map, Map};
#[cfg(test)]
mod test;
pub fn validate(mut grammar: Grammar) -> NormResult<Grammar> {
let (has_enum_token, all_literals) = {
let opt_enum_token = grammar.enum_token();
let conversions = opt_enum_token.map(|et| {
et.conversions.iter()
.map(|conversion| conversion.from)
.collect()
});
let mut validator = Validator {
grammar: &grammar,
all_literals: map(),
conversions: conversions,
};
try!(validator.validate());
(opt_enum_token.is_some(), validator.all_literals)
};
if!has_enum_token {
try!(construct(&mut grammar, all_literals));
}
Ok(grammar)
}
///////////////////////////////////////////////////////////////////////////
// Validation phase -- this phase walks the grammar and visits all
// terminals. If using an external set of tokens, it checks that all
// terminals have a defined conversion to some pattern. Otherwise,
// it collects all terminals into the `all_literals` set for later use.
struct Validator<'grammar> {
grammar: &'grammar Grammar,
all_literals: Map<TerminalLiteral, Span>,
conversions: Option<Set<TerminalString>>,
}
impl<'grammar> Validator<'grammar> {
fn validate(&mut self) -> NormResult<()> |
fn validate_alternative(&mut self, alternative: &Alternative) -> NormResult<()> {
assert!(alternative.condition.is_none()); // macro expansion should have removed these
try!(self.validate_expr(&alternative.expr));
Ok(())
}
fn validate_expr(&mut self, expr: &ExprSymbol) -> NormResult<()> {
for symbol in &expr.symbols {
try!(self.validate_symbol(symbol));
}
Ok(())
}
fn validate_symbol(&mut self, symbol: &Symbol) -> NormResult<()> {
match symbol.kind {
SymbolKind::Expr(ref expr) => {
try!(self.validate_expr(expr));
}
SymbolKind::Terminal(term) => {
try!(self.validate_terminal(symbol.span, term));
}
SymbolKind::Nonterminal(_) => {
}
SymbolKind::Repeat(ref repeat) => {
try!(self.validate_symbol(&repeat.symbol));
}
SymbolKind::Choose(ref sym) | SymbolKind::Name(_, ref sym) => {
try!(self.validate_symbol(sym));
}
SymbolKind::Lookahead | SymbolKind::Lookbehind => {
}
SymbolKind::AmbiguousId(id) => {
panic!("ambiguous id `{}` encountered after name resolution", id)
}
SymbolKind::Macro(..) => {
panic!("macro not removed: {:?}", symbol);
}
}
Ok(())
}
fn validate_terminal(&mut self, span: Span, term: TerminalString) -> NormResult<()> {
match self.conversions {
// If there is an extern token definition, validate that
// this terminal has a defined conversion.
Some(ref c) => {
if!c.contains(&term) {
return_err!(span, "terminal `{}` does not have a pattern defined for it",
term);
}
}
// If there is no extern token definition, then collect
// the terminal literals ("class", r"[a-z]+") into a set.
None => match term {
TerminalString::Bare(c) => {
// Bare identifiers like `x` can never be resolved
// as terminals unless there is a conversion
// defined for them that indicates they are a
// terminal; otherwise it's just an unresolved
// identifier.
panic!("bare literal `{}` without extern token definition", c);
}
TerminalString::Literal(l) => {
self.all_literals.entry(l).or_insert(span);
}
}
}
Ok(())
}
}
///////////////////////////////////////////////////////////////////////////
// Construction phase -- if we are constructing a tokenizer, this
// phase builds up an internal token DFA.
pub fn construct(grammar: &mut Grammar, literals_map: Map<TerminalLiteral, Span>) -> NormResult<()> {
let literals: Vec<TerminalLiteral> =
literals_map.keys()
.cloned()
.collect();
// Build up two vectors, one of parsed regular expressions and
// one of precedences, that are parallel with `literals`.
let mut regexs = Vec::with_capacity(literals.len());
let mut precedences = Vec::with_capacity(literals.len());
try!(intern::read(|interner| {
for &literal in &literals {
match literal {
TerminalLiteral::Quoted(s) => {
precedences.push(Precedence(1));
regexs.push(re::parse_literal(interner.data(s)));
}
TerminalLiteral::Regex(s) => {
precedences.push(Precedence(0));
match re::parse_regex(interner.data(s)) {
Ok(regex) => regexs.push(regex),
Err(error) => {
let literal_span = literals_map[&literal];
// FIXME -- take offset into account for
// span; this requires knowing how many #
// the user used, which we do not track
return_err!(
literal_span,
"invalid regular expression: {}",
error);
}
}
}
}
}
Ok(())
}));
let dfa = match dfa::build_dfa(®exs, &precedences) {
Ok(dfa) => dfa,
Err(DFAConstructionError::NFAConstructionError { index, error }) => {
let feature = match error {
NamedCaptures => r#"named captures (`(?P<foo>...)`)"#,
NonGreedy => r#""non-greedy" repetitions (`*?` or `+?`)"#,
WordBoundary => r#"word boundaries (`\b` or `\B`)"#,
LineBoundary => r#"line boundaries (`^` or `$`)"#,
TextBoundary => r#"text boundaries (`^` or `$`)"#,
};
let literal = literals[index.index()];
let span = literals_map[&literal];
return_err!(
span,
"{} are not supported in regular expressions",
feature)
}
Err(DFAConstructionError::Ambiguity { match0, match1 }) => {
let literal0 = literals[match0.index()];
let literal1 = literals[match1.index()];
let span0 = literals_map[&literal0];
let _span1 = literals_map[&literal1];
// FIXME(#88) -- it'd be nice to give an example here
return_err!(
span0,
"ambiguity detected between the terminal `{}` and the terminal `{}`",
literal0, literal1);
}
};
grammar.items.push(GrammarItem::InternToken(InternToken {
literals: literals,
dfa: dfa
}));
// we need to inject a `'input` lifetime and `input: &'input str` parameter as well:
let input_lifetime = intern(INPUT_LIFETIME);
for parameter in &grammar.type_parameters {
match *parameter {
TypeParameter::Lifetime(i) if i == input_lifetime => {
return_err!(
grammar.span,
"since there is no external token enum specified, \
the `'input` lifetime is implicit and cannot be declared");
}
_ => { }
}
}
let input_parameter = intern(INPUT_PARAMETER);
for parameter in &grammar.parameters {
if parameter.name == input_parameter {
return_err!(
grammar.span,
"since there is no external token enum specified, \
the `input` parameter is implicit and cannot be declared");
}
}
grammar.type_parameters.insert(0, TypeParameter::Lifetime(input_lifetime));
let parameter = Parameter {
name: input_parameter,
ty: TypeRef::Ref {
lifetime: Some(input_lifetime),
mutable: false,
referent: Box::new(TypeRef::Id(intern("str")))
}
};
grammar.parameters.push(parameter);
Ok(())
}
| {
for item in &self.grammar.items {
match *item {
GrammarItem::Use(..) => { }
GrammarItem::ExternToken(_) => { }
GrammarItem::InternToken(_) => { }
GrammarItem::Nonterminal(ref data) => {
for alternative in &data.alternatives {
try!(self.validate_alternative(alternative));
}
}
}
}
Ok(())
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.