file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
vec_box_sized.rs | // run-rustfix
#![allow(dead_code)]
struct SizedStruct(i32);
struct UnsizedStruct([i32]);
struct BigStruct([i32; 10000]);
/// The following should trigger the lint
mod should_trigger {
use super::SizedStruct;
const C: Vec<Box<i32>> = Vec::new();
static S: Vec<Box<i32>> = Vec::new();
struct StructWithVecBox {
sized_type: Vec<Box<SizedStruct>>,
}
struct A(Vec<Box<SizedStruct>>);
struct B(Vec<Vec<Box<(u32)>>>);
}
/// The following should not trigger the lint
mod should_not_trigger {
use super::{BigStruct, UnsizedStruct};
struct C(Vec<Box<UnsizedStruct>>);
struct D(Vec<Box<BigStruct>>);
struct StructWithVecBoxButItsUnsized {
unsized_type: Vec<Box<UnsizedStruct>>,
}
struct TraitVec<T:?Sized> {
// Regression test for #3720. This was causing an ICE.
inner: Vec<Box<T>>,
}
}
mod inner_mod {
mod inner {
pub struct S;
}
mod inner2 {
use super::inner::S;
pub fn | () -> Vec<Box<S>> {
vec![]
}
}
}
fn main() {}
| f | identifier_name |
vec_box_sized.rs | // run-rustfix
#![allow(dead_code)]
struct SizedStruct(i32);
struct UnsizedStruct([i32]);
struct BigStruct([i32; 10000]);
/// The following should trigger the lint
mod should_trigger {
use super::SizedStruct;
const C: Vec<Box<i32>> = Vec::new();
static S: Vec<Box<i32>> = Vec::new();
struct StructWithVecBox {
sized_type: Vec<Box<SizedStruct>>,
}
struct A(Vec<Box<SizedStruct>>);
struct B(Vec<Vec<Box<(u32)>>>);
}
/// The following should not trigger the lint
mod should_not_trigger {
use super::{BigStruct, UnsizedStruct};
struct C(Vec<Box<UnsizedStruct>>);
struct D(Vec<Box<BigStruct>>);
| unsized_type: Vec<Box<UnsizedStruct>>,
}
struct TraitVec<T:?Sized> {
// Regression test for #3720. This was causing an ICE.
inner: Vec<Box<T>>,
}
}
mod inner_mod {
mod inner {
pub struct S;
}
mod inner2 {
use super::inner::S;
pub fn f() -> Vec<Box<S>> {
vec![]
}
}
}
fn main() {} | struct StructWithVecBoxButItsUnsized { | random_line_split |
upgrade.rs | use header::{Header, HeaderFormat};
use std::fmt;
use std::str::FromStr;
use header::parsing::{from_comma_delimited, fmt_comma_delimited};
use unicase::UniCase;
use self::Protocol::{WebSocket, ProtocolExt}; | pub struct Upgrade(pub Vec<Protocol>);
deref!(Upgrade => Vec<Protocol>);
/// Protocol values that can appear in the Upgrade header.
#[derive(Clone, PartialEq, Debug)]
pub enum Protocol {
/// The websocket protocol.
WebSocket,
/// Some other less common protocol.
ProtocolExt(String),
}
impl FromStr for Protocol {
type Err = ();
fn from_str(s: &str) -> Result<Protocol, ()> {
if UniCase(s) == UniCase("websocket") {
Ok(WebSocket)
}
else {
Ok(ProtocolExt(s.to_string()))
}
}
}
impl fmt::Display for Protocol {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", match *self {
WebSocket => "websocket",
ProtocolExt(ref s) => s.as_ref()
})
}
}
impl Header for Upgrade {
fn header_name() -> &'static str {
"Upgrade"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Upgrade> {
from_comma_delimited(raw).map(|vec| Upgrade(vec))
}
}
impl HeaderFormat for Upgrade {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let Upgrade(ref parts) = *self;
fmt_comma_delimited(fmt, &parts[..])
}
}
bench_header!(bench, Upgrade, { vec![b"HTTP/2.0, RTA/x11, websocket".to_vec()] }); |
/// The `Upgrade` header.
#[derive(Clone, PartialEq, Debug)] | random_line_split |
upgrade.rs | use header::{Header, HeaderFormat};
use std::fmt;
use std::str::FromStr;
use header::parsing::{from_comma_delimited, fmt_comma_delimited};
use unicase::UniCase;
use self::Protocol::{WebSocket, ProtocolExt};
/// The `Upgrade` header.
#[derive(Clone, PartialEq, Debug)]
pub struct Upgrade(pub Vec<Protocol>);
deref!(Upgrade => Vec<Protocol>);
/// Protocol values that can appear in the Upgrade header.
#[derive(Clone, PartialEq, Debug)]
pub enum Protocol {
/// The websocket protocol.
WebSocket,
/// Some other less common protocol.
ProtocolExt(String),
}
impl FromStr for Protocol {
type Err = ();
fn from_str(s: &str) -> Result<Protocol, ()> {
if UniCase(s) == UniCase("websocket") {
Ok(WebSocket)
}
else {
Ok(ProtocolExt(s.to_string()))
}
}
}
impl fmt::Display for Protocol {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", match *self {
WebSocket => "websocket",
ProtocolExt(ref s) => s.as_ref()
})
}
}
impl Header for Upgrade {
fn header_name() -> &'static str |
fn parse_header(raw: &[Vec<u8>]) -> Option<Upgrade> {
from_comma_delimited(raw).map(|vec| Upgrade(vec))
}
}
impl HeaderFormat for Upgrade {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let Upgrade(ref parts) = *self;
fmt_comma_delimited(fmt, &parts[..])
}
}
bench_header!(bench, Upgrade, { vec![b"HTTP/2.0, RTA/x11, websocket".to_vec()] });
| {
"Upgrade"
} | identifier_body |
upgrade.rs | use header::{Header, HeaderFormat};
use std::fmt;
use std::str::FromStr;
use header::parsing::{from_comma_delimited, fmt_comma_delimited};
use unicase::UniCase;
use self::Protocol::{WebSocket, ProtocolExt};
/// The `Upgrade` header.
#[derive(Clone, PartialEq, Debug)]
pub struct Upgrade(pub Vec<Protocol>);
deref!(Upgrade => Vec<Protocol>);
/// Protocol values that can appear in the Upgrade header.
#[derive(Clone, PartialEq, Debug)]
pub enum Protocol {
/// The websocket protocol.
WebSocket,
/// Some other less common protocol.
ProtocolExt(String),
}
impl FromStr for Protocol {
type Err = ();
fn from_str(s: &str) -> Result<Protocol, ()> {
if UniCase(s) == UniCase("websocket") {
Ok(WebSocket)
}
else {
Ok(ProtocolExt(s.to_string()))
}
}
}
impl fmt::Display for Protocol {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", match *self {
WebSocket => "websocket",
ProtocolExt(ref s) => s.as_ref()
})
}
}
impl Header for Upgrade {
fn | () -> &'static str {
"Upgrade"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Upgrade> {
from_comma_delimited(raw).map(|vec| Upgrade(vec))
}
}
impl HeaderFormat for Upgrade {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let Upgrade(ref parts) = *self;
fmt_comma_delimited(fmt, &parts[..])
}
}
bench_header!(bench, Upgrade, { vec![b"HTTP/2.0, RTA/x11, websocket".to_vec()] });
| header_name | identifier_name |
upgrade.rs | use header::{Header, HeaderFormat};
use std::fmt;
use std::str::FromStr;
use header::parsing::{from_comma_delimited, fmt_comma_delimited};
use unicase::UniCase;
use self::Protocol::{WebSocket, ProtocolExt};
/// The `Upgrade` header.
#[derive(Clone, PartialEq, Debug)]
pub struct Upgrade(pub Vec<Protocol>);
deref!(Upgrade => Vec<Protocol>);
/// Protocol values that can appear in the Upgrade header.
#[derive(Clone, PartialEq, Debug)]
pub enum Protocol {
/// The websocket protocol.
WebSocket,
/// Some other less common protocol.
ProtocolExt(String),
}
impl FromStr for Protocol {
type Err = ();
fn from_str(s: &str) -> Result<Protocol, ()> {
if UniCase(s) == UniCase("websocket") {
Ok(WebSocket)
}
else |
}
}
impl fmt::Display for Protocol {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", match *self {
WebSocket => "websocket",
ProtocolExt(ref s) => s.as_ref()
})
}
}
impl Header for Upgrade {
fn header_name() -> &'static str {
"Upgrade"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Upgrade> {
from_comma_delimited(raw).map(|vec| Upgrade(vec))
}
}
impl HeaderFormat for Upgrade {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let Upgrade(ref parts) = *self;
fmt_comma_delimited(fmt, &parts[..])
}
}
bench_header!(bench, Upgrade, { vec![b"HTTP/2.0, RTA/x11, websocket".to_vec()] });
| {
Ok(ProtocolExt(s.to_string()))
} | conditional_block |
simd-intrinsic-float-math.rs | // run-pass
// ignore-emscripten
// ignore-android
// FIXME: this test fails on arm-android because the NDK version 14 is too old.
// It needs at least version 18. We disable it on all android build bots because
// there is no way in compile-test to disable it for an (arch,os) pair.
// Test that the simd floating-point math intrinsics produce correct results.
#![feature(repr_simd, platform_intrinsics)]
#![allow(non_camel_case_types)]
#[repr(simd)]
#[derive(Copy, Clone, PartialEq, Debug)]
struct f32x4(pub f32, pub f32, pub f32, pub f32);
extern "platform-intrinsic" {
fn simd_fsqrt<T>(x: T) -> T;
fn simd_fabs<T>(x: T) -> T;
fn simd_fsin<T>(x: T) -> T;
fn simd_fcos<T>(x: T) -> T;
fn simd_fexp<T>(x: T) -> T;
fn simd_fexp2<T>(x: T) -> T;
fn simd_fma<T>(x: T, y: T, z: T) -> T;
fn simd_flog<T>(x: T) -> T;
fn simd_flog10<T>(x: T) -> T;
fn simd_flog2<T>(x: T) -> T;
fn simd_fpow<T>(x: T, y: T) -> T;
fn simd_fpowi<T>(x: T, y: i32) -> T;
// rounding functions
fn simd_ceil<T>(x: T) -> T;
fn simd_floor<T>(x: T) -> T;
fn simd_round<T>(x: T) -> T;
fn simd_trunc<T>(x: T) -> T;
}
macro_rules! assert_approx_eq_f32 {
($a:expr, $b:expr) => ({
let (a, b) = (&$a, &$b);
assert!((*a - *b).abs() < 1.0e-6,
"{} is not approximately equal to {}", *a, *b);
})
}
macro_rules! assert_approx_eq {
($a:expr, $b:expr) => ({
let a = $a;
let b = $b;
assert_approx_eq_f32!(a.0, b.0);
assert_approx_eq_f32!(a.1, b.1);
assert_approx_eq_f32!(a.2, b.2);
assert_approx_eq_f32!(a.3, b.3);
})
}
fn main() {
let x = f32x4(1.0, 1.0, 1.0, 1.0);
let y = f32x4(-1.0, -1.0, -1.0, -1.0);
let z = f32x4(0.0, 0.0, 0.0, 0.0);
let h = f32x4(0.5, 0.5, 0.5, 0.5);
unsafe {
let r = simd_fabs(y);
assert_approx_eq!(x, r);
| assert_approx_eq!(x, r);
let r = simd_fexp(z);
assert_approx_eq!(x, r);
let r = simd_fexp2(z);
assert_approx_eq!(x, r);
let r = simd_fma(x, h, h);
assert_approx_eq!(x, r);
let r = simd_fsqrt(x);
assert_approx_eq!(x, r);
let r = simd_flog(x);
assert_approx_eq!(z, r);
let r = simd_flog2(x);
assert_approx_eq!(z, r);
let r = simd_flog10(x);
assert_approx_eq!(z, r);
let r = simd_fpow(h, x);
assert_approx_eq!(h, r);
let r = simd_fpowi(h, 1);
assert_approx_eq!(h, r);
let r = simd_fsin(z);
assert_approx_eq!(z, r);
// rounding functions
let r = simd_floor(h);
assert_eq!(z, r);
let r = simd_ceil(h);
assert_eq!(x, r);
let r = simd_round(h);
assert_eq!(x, r);
let r = simd_trunc(h);
assert_eq!(z, r);
}
} | let r = simd_fcos(z); | random_line_split |
simd-intrinsic-float-math.rs | // run-pass
// ignore-emscripten
// ignore-android
// FIXME: this test fails on arm-android because the NDK version 14 is too old.
// It needs at least version 18. We disable it on all android build bots because
// there is no way in compile-test to disable it for an (arch,os) pair.
// Test that the simd floating-point math intrinsics produce correct results.
#![feature(repr_simd, platform_intrinsics)]
#![allow(non_camel_case_types)]
#[repr(simd)]
#[derive(Copy, Clone, PartialEq, Debug)]
struct | (pub f32, pub f32, pub f32, pub f32);
extern "platform-intrinsic" {
fn simd_fsqrt<T>(x: T) -> T;
fn simd_fabs<T>(x: T) -> T;
fn simd_fsin<T>(x: T) -> T;
fn simd_fcos<T>(x: T) -> T;
fn simd_fexp<T>(x: T) -> T;
fn simd_fexp2<T>(x: T) -> T;
fn simd_fma<T>(x: T, y: T, z: T) -> T;
fn simd_flog<T>(x: T) -> T;
fn simd_flog10<T>(x: T) -> T;
fn simd_flog2<T>(x: T) -> T;
fn simd_fpow<T>(x: T, y: T) -> T;
fn simd_fpowi<T>(x: T, y: i32) -> T;
// rounding functions
fn simd_ceil<T>(x: T) -> T;
fn simd_floor<T>(x: T) -> T;
fn simd_round<T>(x: T) -> T;
fn simd_trunc<T>(x: T) -> T;
}
macro_rules! assert_approx_eq_f32 {
($a:expr, $b:expr) => ({
let (a, b) = (&$a, &$b);
assert!((*a - *b).abs() < 1.0e-6,
"{} is not approximately equal to {}", *a, *b);
})
}
macro_rules! assert_approx_eq {
($a:expr, $b:expr) => ({
let a = $a;
let b = $b;
assert_approx_eq_f32!(a.0, b.0);
assert_approx_eq_f32!(a.1, b.1);
assert_approx_eq_f32!(a.2, b.2);
assert_approx_eq_f32!(a.3, b.3);
})
}
fn main() {
let x = f32x4(1.0, 1.0, 1.0, 1.0);
let y = f32x4(-1.0, -1.0, -1.0, -1.0);
let z = f32x4(0.0, 0.0, 0.0, 0.0);
let h = f32x4(0.5, 0.5, 0.5, 0.5);
unsafe {
let r = simd_fabs(y);
assert_approx_eq!(x, r);
let r = simd_fcos(z);
assert_approx_eq!(x, r);
let r = simd_fexp(z);
assert_approx_eq!(x, r);
let r = simd_fexp2(z);
assert_approx_eq!(x, r);
let r = simd_fma(x, h, h);
assert_approx_eq!(x, r);
let r = simd_fsqrt(x);
assert_approx_eq!(x, r);
let r = simd_flog(x);
assert_approx_eq!(z, r);
let r = simd_flog2(x);
assert_approx_eq!(z, r);
let r = simd_flog10(x);
assert_approx_eq!(z, r);
let r = simd_fpow(h, x);
assert_approx_eq!(h, r);
let r = simd_fpowi(h, 1);
assert_approx_eq!(h, r);
let r = simd_fsin(z);
assert_approx_eq!(z, r);
// rounding functions
let r = simd_floor(h);
assert_eq!(z, r);
let r = simd_ceil(h);
assert_eq!(x, r);
let r = simd_round(h);
assert_eq!(x, r);
let r = simd_trunc(h);
assert_eq!(z, r);
}
}
| f32x4 | identifier_name |
image_cache_task.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use image::base::Image;
use ipc_channel::ipc::{self, IpcSender};
use url::Url;
use std::sync::Arc;
use util::mem::HeapSizeOf;
/// This is optionally passed to the image cache when requesting
/// and image, and returned to the specified event loop when the
/// image load completes. It is typically used to trigger a reflow
/// and/or repaint.
#[derive(Deserialize, Serialize)]
pub struct | {
sender: IpcSender<ImageResponse>,
}
impl ImageResponder {
pub fn new(sender: IpcSender<ImageResponse>) -> ImageResponder {
ImageResponder {
sender: sender,
}
}
pub fn respond(&self, response: ImageResponse) {
self.sender.send(response).unwrap()
}
}
/// The current state of an image in the cache.
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum ImageState {
Pending,
LoadError,
NotRequested,
}
/// The returned image.
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum ImageResponse {
/// The requested image was loaded.
Loaded(Arc<Image>),
/// The requested image failed to load, so a placeholder was loaded instead.
PlaceholderLoaded(Arc<Image>),
/// Neither the requested image nor the placeholder could be loaded.
None
}
/// Channel for sending commands to the image cache.
#[derive(Clone, Deserialize, Serialize)]
pub struct ImageCacheChan(pub IpcSender<ImageCacheResult>);
/// The result of an image cache command that is returned to the
/// caller.
#[derive(Deserialize, Serialize)]
pub struct ImageCacheResult {
pub responder: Option<ImageResponder>,
pub image_response: ImageResponse,
}
/// Commands that the image cache understands.
#[derive(Deserialize, Serialize)]
pub enum ImageCacheCommand {
/// Request an image asynchronously from the cache. Supply a channel
/// to receive the result, and optionally an image responder
/// that is passed to the result channel.
RequestImage(Url, ImageCacheChan, Option<ImageResponder>),
/// Synchronously check the state of an image in the cache.
/// TODO(gw): Profile this on some real world sites and see
/// if it's worth caching the results of this locally in each
/// layout / paint task.
GetImageIfAvailable(Url, UsePlaceholder, IpcSender<Result<Arc<Image>, ImageState>>),
/// Clients must wait for a response before shutting down the ResourceTask
Exit(IpcSender<()>),
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize)]
pub enum UsePlaceholder {
No,
Yes,
}
/// The client side of the image cache task. This can be safely cloned
/// and passed to different tasks.
#[derive(Clone, Deserialize, Serialize)]
pub struct ImageCacheTask {
chan: IpcSender<ImageCacheCommand>,
}
/// The public API for the image cache task.
impl ImageCacheTask {
/// Construct a new image cache
pub fn new(chan: IpcSender<ImageCacheCommand>) -> ImageCacheTask {
ImageCacheTask {
chan: chan,
}
}
/// Asynchronously request and image. See ImageCacheCommand::RequestImage.
pub fn request_image(&self,
url: Url,
result_chan: ImageCacheChan,
responder: Option<ImageResponder>) {
let msg = ImageCacheCommand::RequestImage(url, result_chan, responder);
self.chan.send(msg).unwrap();
}
/// Get the current state of an image. See ImageCacheCommand::GetImageIfAvailable.
pub fn get_image_if_available(&self, url: Url, use_placeholder: UsePlaceholder)
-> Result<Arc<Image>, ImageState> {
let (sender, receiver) = ipc::channel().unwrap();
let msg = ImageCacheCommand::GetImageIfAvailable(url, use_placeholder, sender);
self.chan.send(msg).unwrap();
receiver.recv().unwrap()
}
/// Shutdown the image cache task.
pub fn exit(&self) {
let (response_chan, response_port) = ipc::channel().unwrap();
self.chan.send(ImageCacheCommand::Exit(response_chan)).unwrap();
response_port.recv().unwrap();
}
}
| ImageResponder | identifier_name |
image_cache_task.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use image::base::Image;
use ipc_channel::ipc::{self, IpcSender};
use url::Url;
use std::sync::Arc;
use util::mem::HeapSizeOf;
/// This is optionally passed to the image cache when requesting
/// and image, and returned to the specified event loop when the
/// image load completes. It is typically used to trigger a reflow
/// and/or repaint.
#[derive(Deserialize, Serialize)]
pub struct ImageResponder {
sender: IpcSender<ImageResponse>,
}
impl ImageResponder {
pub fn new(sender: IpcSender<ImageResponse>) -> ImageResponder {
ImageResponder {
sender: sender,
}
}
pub fn respond(&self, response: ImageResponse) {
self.sender.send(response).unwrap()
}
}
/// The current state of an image in the cache.
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum ImageState {
Pending,
LoadError,
NotRequested,
}
/// The returned image.
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum ImageResponse {
/// The requested image was loaded.
Loaded(Arc<Image>),
/// The requested image failed to load, so a placeholder was loaded instead.
PlaceholderLoaded(Arc<Image>),
/// Neither the requested image nor the placeholder could be loaded.
None
}
/// Channel for sending commands to the image cache.
#[derive(Clone, Deserialize, Serialize)]
pub struct ImageCacheChan(pub IpcSender<ImageCacheResult>);
/// The result of an image cache command that is returned to the
/// caller.
#[derive(Deserialize, Serialize)]
pub struct ImageCacheResult {
pub responder: Option<ImageResponder>,
pub image_response: ImageResponse,
}
/// Commands that the image cache understands.
#[derive(Deserialize, Serialize)]
pub enum ImageCacheCommand {
/// Request an image asynchronously from the cache. Supply a channel
/// to receive the result, and optionally an image responder
/// that is passed to the result channel.
RequestImage(Url, ImageCacheChan, Option<ImageResponder>),
/// Synchronously check the state of an image in the cache.
/// TODO(gw): Profile this on some real world sites and see
/// if it's worth caching the results of this locally in each
/// layout / paint task.
GetImageIfAvailable(Url, UsePlaceholder, IpcSender<Result<Arc<Image>, ImageState>>),
/// Clients must wait for a response before shutting down the ResourceTask
Exit(IpcSender<()>),
}
#[derive(Copy, Clone, PartialEq, Deserialize, Serialize)]
pub enum UsePlaceholder {
No,
Yes,
}
/// The client side of the image cache task. This can be safely cloned
/// and passed to different tasks.
#[derive(Clone, Deserialize, Serialize)]
pub struct ImageCacheTask {
chan: IpcSender<ImageCacheCommand>,
}
/// The public API for the image cache task.
impl ImageCacheTask {
/// Construct a new image cache
pub fn new(chan: IpcSender<ImageCacheCommand>) -> ImageCacheTask {
ImageCacheTask {
chan: chan,
}
}
/// Asynchronously request and image. See ImageCacheCommand::RequestImage.
pub fn request_image(&self,
url: Url,
result_chan: ImageCacheChan,
responder: Option<ImageResponder>) {
let msg = ImageCacheCommand::RequestImage(url, result_chan, responder);
self.chan.send(msg).unwrap();
}
/// Get the current state of an image. See ImageCacheCommand::GetImageIfAvailable.
pub fn get_image_if_available(&self, url: Url, use_placeholder: UsePlaceholder)
-> Result<Arc<Image>, ImageState> {
let (sender, receiver) = ipc::channel().unwrap();
let msg = ImageCacheCommand::GetImageIfAvailable(url, use_placeholder, sender);
self.chan.send(msg).unwrap();
receiver.recv().unwrap()
}
/// Shutdown the image cache task.
pub fn exit(&self) {
let (response_chan, response_port) = ipc::channel().unwrap(); | self.chan.send(ImageCacheCommand::Exit(response_chan)).unwrap();
response_port.recv().unwrap();
}
} | random_line_split |
|
cgmath_augment.rs | use cgmath::{Point2, Vector2};
pub trait Cross<T, S> { | pub trait Dot<T, S> {
fn dot(&self, other: &T) -> S;
}
//stupid type rules won't let me add std::ops::Add for f32/f64
pub trait AddScalar<T, S> {
fn add_scalar(self, rhs: S) -> T;
}
impl<S> Cross<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn cross(&self, other: &Point2<S>) -> S {
(self.x * other.y) - (self.y * other.x)
}
}
impl<S> Cross<Vector2<S>, S> for Vector2<S> where S: cgmath::BaseFloat {
fn cross(&self, other: &Vector2<S>) -> S {
(self.x * other.y) - (self.y * other.x)
}
}
impl<S> Dot<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn dot(&self, other: &Point2<S>) -> S {
(self.x * other.x) + (self.y * other.y)
}
}
impl<S> Dot<Vector2<S>, S> for Vector2<S> where S: cgmath::BaseFloat {
fn dot(&self, other: &Vector2<S>) -> S {
(self.x * other.x) + (self.y * other.y)
}
}
impl<S> AddScalar<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn add_scalar(self, rhs: S) -> Point2<S> {
Self {
x: self.x + rhs,
y: self.y + rhs
}
}
} | fn cross(&self, other: &T) -> S;
}
| random_line_split |
cgmath_augment.rs | use cgmath::{Point2, Vector2};
pub trait Cross<T, S> {
fn cross(&self, other: &T) -> S;
}
pub trait Dot<T, S> {
fn dot(&self, other: &T) -> S;
}
//stupid type rules won't let me add std::ops::Add for f32/f64
pub trait AddScalar<T, S> {
fn add_scalar(self, rhs: S) -> T;
}
impl<S> Cross<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn cross(&self, other: &Point2<S>) -> S {
(self.x * other.y) - (self.y * other.x)
}
}
impl<S> Cross<Vector2<S>, S> for Vector2<S> where S: cgmath::BaseFloat {
fn cross(&self, other: &Vector2<S>) -> S |
}
impl<S> Dot<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn dot(&self, other: &Point2<S>) -> S {
(self.x * other.x) + (self.y * other.y)
}
}
impl<S> Dot<Vector2<S>, S> for Vector2<S> where S: cgmath::BaseFloat {
fn dot(&self, other: &Vector2<S>) -> S {
(self.x * other.x) + (self.y * other.y)
}
}
impl<S> AddScalar<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn add_scalar(self, rhs: S) -> Point2<S> {
Self {
x: self.x + rhs,
y: self.y + rhs
}
}
}
| {
(self.x * other.y) - (self.y * other.x)
} | identifier_body |
cgmath_augment.rs | use cgmath::{Point2, Vector2};
pub trait Cross<T, S> {
fn cross(&self, other: &T) -> S;
}
pub trait Dot<T, S> {
fn dot(&self, other: &T) -> S;
}
//stupid type rules won't let me add std::ops::Add for f32/f64
pub trait AddScalar<T, S> {
fn add_scalar(self, rhs: S) -> T;
}
impl<S> Cross<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn | (&self, other: &Point2<S>) -> S {
(self.x * other.y) - (self.y * other.x)
}
}
impl<S> Cross<Vector2<S>, S> for Vector2<S> where S: cgmath::BaseFloat {
fn cross(&self, other: &Vector2<S>) -> S {
(self.x * other.y) - (self.y * other.x)
}
}
impl<S> Dot<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn dot(&self, other: &Point2<S>) -> S {
(self.x * other.x) + (self.y * other.y)
}
}
impl<S> Dot<Vector2<S>, S> for Vector2<S> where S: cgmath::BaseFloat {
fn dot(&self, other: &Vector2<S>) -> S {
(self.x * other.x) + (self.y * other.y)
}
}
impl<S> AddScalar<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn add_scalar(self, rhs: S) -> Point2<S> {
Self {
x: self.x + rhs,
y: self.y + rhs
}
}
}
| cross | identifier_name |
create.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
// | // This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use toml_query::insert::TomlValueInsertExt;
use chrono::naive::NaiveDateTime as NDT;
use constants::*;
use error::TimeTrackError as TTE;
use error::TimeTrackErrorKind as TTEK;
use error::ResultExt;
use iter::storeid::TagStoreIdIter;
use iter::setendtime::SetEndTimeIter;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
pub struct CreateTimeTrackIter<'a> {
inner: TagStoreIdIter,
store: &'a Store,
}
impl<'a> CreateTimeTrackIter<'a>
{
pub fn new(inner: TagStoreIdIter, store: &'a Store) -> CreateTimeTrackIter<'a> {
CreateTimeTrackIter {
inner: inner,
store: store,
}
}
pub fn set_end_time(self, datetime: NDT) -> SetEndTimeIter<'a> {
SetEndTimeIter::new(self, datetime)
}
}
impl<'a> Iterator for CreateTimeTrackIter<'a>
{
type Item = Result<FileLockEntry<'a>, TTE>;
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(|res| {
res.and_then(|(id, starttime)| {
self.store
.create(id)
.chain_err(|| TTEK::StoreWriteError)
.and_then(|mut entry| {
let v = Value::String(starttime.format(DATE_TIME_FORMAT).to_string());
entry.get_header_mut()
.insert(DATE_TIME_START_HEADER_PATH, v)
.chain_err(|| TTEK::HeaderWriteError)
.map(|_| entry)
})
})
})
}
} | random_line_split |
|
create.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use toml_query::insert::TomlValueInsertExt;
use chrono::naive::NaiveDateTime as NDT;
use constants::*;
use error::TimeTrackError as TTE;
use error::TimeTrackErrorKind as TTEK;
use error::ResultExt;
use iter::storeid::TagStoreIdIter;
use iter::setendtime::SetEndTimeIter;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
pub struct CreateTimeTrackIter<'a> {
inner: TagStoreIdIter,
store: &'a Store,
}
impl<'a> CreateTimeTrackIter<'a>
{
pub fn | (inner: TagStoreIdIter, store: &'a Store) -> CreateTimeTrackIter<'a> {
CreateTimeTrackIter {
inner: inner,
store: store,
}
}
pub fn set_end_time(self, datetime: NDT) -> SetEndTimeIter<'a> {
SetEndTimeIter::new(self, datetime)
}
}
impl<'a> Iterator for CreateTimeTrackIter<'a>
{
type Item = Result<FileLockEntry<'a>, TTE>;
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(|res| {
res.and_then(|(id, starttime)| {
self.store
.create(id)
.chain_err(|| TTEK::StoreWriteError)
.and_then(|mut entry| {
let v = Value::String(starttime.format(DATE_TIME_FORMAT).to_string());
entry.get_header_mut()
.insert(DATE_TIME_START_HEADER_PATH, v)
.chain_err(|| TTEK::HeaderWriteError)
.map(|_| entry)
})
})
})
}
}
| new | identifier_name |
day_05.rs | pub fn first() {
let filename = "day05-01.txt";
let mut lines: Vec<String> = super::helpers::read_lines(filename)
.into_iter()
.map(|x| x.unwrap())
.collect();
lines.sort();
let mut max = 0;
for line in lines {
let id = get_id(&line);
if id > max {
max = id;
}
}
println!("Day 05 - 1: {}", max);
}
fn get_id(code: &str) -> i32 {
let fb = &code[0..7];
let lr = &code[7..];
let mut idx;
let mut min: i32 = 0;
let mut max: i32 = 127;
let mut gap = max - min + 1;
for y in fb.chars() {
if y == 'F' {
max = max - gap / 2;
} else if y == 'B' |
gap = max - min + 1;
println!("{} {} {}", max, min, gap);
}
idx = max * 8;
min = 0;
max = 7;
gap = max - min + 1;
for x in lr.chars() {
if x == 'L' {
max = max - gap / 2;
} else if x == 'R' {
min = min + gap / 2;
}
gap = max - min + 1;
println!("{} {} {}", max, min, gap);
}
idx += max;
println!("{}-{}", max, min);
idx
}
pub fn second() {
let filename = "day05-01.txt";
let lines: Vec<String> = super::helpers::read_lines(filename)
.into_iter()
.map(|x| x.unwrap())
.collect();
let mut ids: Vec<i32> = lines.iter().map(|x| get_id(&x)).collect();
ids.sort();
let first = ids.iter();
let mut second = ids.iter();
second.next();
let zipped = first.zip(second);
for (a, b) in zipped {
// println!("{} {}", a, b);
if b - a == 2 {
println!("Day 05 - 2: {}", b - 1);
break;
}
}
}
| {
min = min + gap / 2;
} | conditional_block |
day_05.rs | pub fn first() {
let filename = "day05-01.txt";
let mut lines: Vec<String> = super::helpers::read_lines(filename)
.into_iter()
.map(|x| x.unwrap())
.collect();
lines.sort();
let mut max = 0;
for line in lines {
let id = get_id(&line);
if id > max {
max = id;
}
}
println!("Day 05 - 1: {}", max);
}
fn get_id(code: &str) -> i32 {
let fb = &code[0..7]; | let mut min: i32 = 0;
let mut max: i32 = 127;
let mut gap = max - min + 1;
for y in fb.chars() {
if y == 'F' {
max = max - gap / 2;
} else if y == 'B' {
min = min + gap / 2;
}
gap = max - min + 1;
println!("{} {} {}", max, min, gap);
}
idx = max * 8;
min = 0;
max = 7;
gap = max - min + 1;
for x in lr.chars() {
if x == 'L' {
max = max - gap / 2;
} else if x == 'R' {
min = min + gap / 2;
}
gap = max - min + 1;
println!("{} {} {}", max, min, gap);
}
idx += max;
println!("{}-{}", max, min);
idx
}
pub fn second() {
let filename = "day05-01.txt";
let lines: Vec<String> = super::helpers::read_lines(filename)
.into_iter()
.map(|x| x.unwrap())
.collect();
let mut ids: Vec<i32> = lines.iter().map(|x| get_id(&x)).collect();
ids.sort();
let first = ids.iter();
let mut second = ids.iter();
second.next();
let zipped = first.zip(second);
for (a, b) in zipped {
// println!("{} {}", a, b);
if b - a == 2 {
println!("Day 05 - 2: {}", b - 1);
break;
}
}
} | let lr = &code[7..];
let mut idx;
| random_line_split |
day_05.rs | pub fn first() |
fn get_id(code: &str) -> i32 {
let fb = &code[0..7];
let lr = &code[7..];
let mut idx;
let mut min: i32 = 0;
let mut max: i32 = 127;
let mut gap = max - min + 1;
for y in fb.chars() {
if y == 'F' {
max = max - gap / 2;
} else if y == 'B' {
min = min + gap / 2;
}
gap = max - min + 1;
println!("{} {} {}", max, min, gap);
}
idx = max * 8;
min = 0;
max = 7;
gap = max - min + 1;
for x in lr.chars() {
if x == 'L' {
max = max - gap / 2;
} else if x == 'R' {
min = min + gap / 2;
}
gap = max - min + 1;
println!("{} {} {}", max, min, gap);
}
idx += max;
println!("{}-{}", max, min);
idx
}
pub fn second() {
let filename = "day05-01.txt";
let lines: Vec<String> = super::helpers::read_lines(filename)
.into_iter()
.map(|x| x.unwrap())
.collect();
let mut ids: Vec<i32> = lines.iter().map(|x| get_id(&x)).collect();
ids.sort();
let first = ids.iter();
let mut second = ids.iter();
second.next();
let zipped = first.zip(second);
for (a, b) in zipped {
// println!("{} {}", a, b);
if b - a == 2 {
println!("Day 05 - 2: {}", b - 1);
break;
}
}
}
| {
let filename = "day05-01.txt";
let mut lines: Vec<String> = super::helpers::read_lines(filename)
.into_iter()
.map(|x| x.unwrap())
.collect();
lines.sort();
let mut max = 0;
for line in lines {
let id = get_id(&line);
if id > max {
max = id;
}
}
println!("Day 05 - 1: {}", max);
} | identifier_body |
day_05.rs | pub fn | () {
let filename = "day05-01.txt";
let mut lines: Vec<String> = super::helpers::read_lines(filename)
.into_iter()
.map(|x| x.unwrap())
.collect();
lines.sort();
let mut max = 0;
for line in lines {
let id = get_id(&line);
if id > max {
max = id;
}
}
println!("Day 05 - 1: {}", max);
}
fn get_id(code: &str) -> i32 {
let fb = &code[0..7];
let lr = &code[7..];
let mut idx;
let mut min: i32 = 0;
let mut max: i32 = 127;
let mut gap = max - min + 1;
for y in fb.chars() {
if y == 'F' {
max = max - gap / 2;
} else if y == 'B' {
min = min + gap / 2;
}
gap = max - min + 1;
println!("{} {} {}", max, min, gap);
}
idx = max * 8;
min = 0;
max = 7;
gap = max - min + 1;
for x in lr.chars() {
if x == 'L' {
max = max - gap / 2;
} else if x == 'R' {
min = min + gap / 2;
}
gap = max - min + 1;
println!("{} {} {}", max, min, gap);
}
idx += max;
println!("{}-{}", max, min);
idx
}
pub fn second() {
let filename = "day05-01.txt";
let lines: Vec<String> = super::helpers::read_lines(filename)
.into_iter()
.map(|x| x.unwrap())
.collect();
let mut ids: Vec<i32> = lines.iter().map(|x| get_id(&x)).collect();
ids.sort();
let first = ids.iter();
let mut second = ids.iter();
second.next();
let zipped = first.zip(second);
for (a, b) in zipped {
// println!("{} {}", a, b);
if b - a == 2 {
println!("Day 05 - 2: {}", b - 1);
break;
}
}
}
| first | identifier_name |
issue-19479.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
// pretty-expanded FIXME #23616
trait Base {
fn dummy(&self) { }
}
trait AssocA {
type X: Base;
fn dummy(&self) { }
}
trait AssocB {
type Y: Base;
fn dummy(&self) { }
}
impl<T: AssocA> AssocB for T {
type Y = <T as AssocA>::X;
} |
fn main() {} | random_line_split |
|
issue-19479.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
// pretty-expanded FIXME #23616
trait Base {
fn | (&self) { }
}
trait AssocA {
type X: Base;
fn dummy(&self) { }
}
trait AssocB {
type Y: Base;
fn dummy(&self) { }
}
impl<T: AssocA> AssocB for T {
type Y = <T as AssocA>::X;
}
fn main() {}
| dummy | identifier_name |
issue-19479.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
// pretty-expanded FIXME #23616
trait Base {
fn dummy(&self) { }
}
trait AssocA {
type X: Base;
fn dummy(&self) |
}
trait AssocB {
type Y: Base;
fn dummy(&self) { }
}
impl<T: AssocA> AssocB for T {
type Y = <T as AssocA>::X;
}
fn main() {}
| { } | identifier_body |
exchange.rs | //! The exchange pattern distributes pushed data between many target pushees.
use {Push, Data};
use dataflow::channels::Content;
use abomonation::Abomonation;
// TODO : Software write combining
/// Distributes records among target pushees according to a distribution function.
pub struct Exchange<T, D, P: Push<(T, Content<D>)>, H: Fn(&D) -> u64> {
pushers: Vec<P>,
buffers: Vec<Vec<D>>,
current: Option<T>,
hash_func: H,
}
impl<T: Clone, D, P: Push<(T, Content<D>)>, H: Fn(&D)->u64> Exchange<T, D, P, H> {
/// Allocates a new `Exchange` from a supplied set of pushers and a distribution function.
pub fn new(pushers: Vec<P>, key: H) -> Exchange<T, D, P, H> {
let mut buffers = vec![];
for _ in 0..pushers.len() {
buffers.push(Vec::with_capacity(Content::<D>::default_length()));
}
Exchange {
pushers: pushers,
hash_func: key,
buffers: buffers,
current: None,
}
}
#[inline]
fn flush(&mut self, index: usize) {
if!self.buffers[index].is_empty() {
if let Some(ref time) = self.current {
Content::push_at(&mut self.buffers[index], time.clone(), &mut self.pushers[index]);
}
}
}
}
impl<T: Eq+Clone+'static, D: Data+Abomonation, P: Push<(T, Content<D>)>, H: Fn(&D)->u64> Push<(T, Content<D>)> for Exchange<T, D, P, H> {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) | let index = (((self.hash_func)(&datum)) & mask) as usize;
self.buffers[index].push(datum);
if self.buffers[index].len() == self.buffers[index].capacity() {
self.flush(index);
}
// unsafe {
// self.buffers.get_unchecked_mut(index).push(datum);
// if self.buffers.get_unchecked(index).len() == self.buffers.get_unchecked(index).capacity() {
// self.flush(index);
// }
// }
}
}
// as a last resort, use mod (%)
else {
for datum in data.drain(..) {
let index = (((self.hash_func)(&datum)) % self.pushers.len() as u64) as usize;
self.buffers[index].push(datum);
if self.buffers[index].len() == self.buffers[index].capacity() {
self.flush(index);
}
}
}
}
else {
// flush
for index in 0..self.pushers.len() {
self.flush(index);
self.pushers[index].push(&mut None);
}
}
}
}
}
| {
// if only one pusher, no exchange
if self.pushers.len() == 1 {
self.pushers[0].push(message);
}
else {
if let Some((ref time, ref mut data)) = *message {
// if the time isn't right, flush everything.
if self.current.as_ref().map_or(false, |x| x != time) {
for index in 0..self.pushers.len() {
self.flush(index);
}
}
self.current = Some(time.clone());
// if the number of pushers is a power of two, use a mask
if (self.pushers.len() & (self.pushers.len() - 1)) == 0 {
let mask = (self.pushers.len() - 1) as u64;
for datum in data.drain(..) { | identifier_body |
exchange.rs | //! The exchange pattern distributes pushed data between many target pushees.
use {Push, Data};
use dataflow::channels::Content;
use abomonation::Abomonation;
// TODO : Software write combining
/// Distributes records among target pushees according to a distribution function.
pub struct Exchange<T, D, P: Push<(T, Content<D>)>, H: Fn(&D) -> u64> {
pushers: Vec<P>,
buffers: Vec<Vec<D>>,
current: Option<T>,
hash_func: H,
}
impl<T: Clone, D, P: Push<(T, Content<D>)>, H: Fn(&D)->u64> Exchange<T, D, P, H> {
/// Allocates a new `Exchange` from a supplied set of pushers and a distribution function.
pub fn new(pushers: Vec<P>, key: H) -> Exchange<T, D, P, H> {
let mut buffers = vec![];
for _ in 0..pushers.len() {
buffers.push(Vec::with_capacity(Content::<D>::default_length()));
}
Exchange {
pushers: pushers,
hash_func: key,
buffers: buffers,
current: None,
}
}
#[inline]
fn flush(&mut self, index: usize) {
if!self.buffers[index].is_empty() {
if let Some(ref time) = self.current {
Content::push_at(&mut self.buffers[index], time.clone(), &mut self.pushers[index]);
}
}
}
}
impl<T: Eq+Clone+'static, D: Data+Abomonation, P: Push<(T, Content<D>)>, H: Fn(&D)->u64> Push<(T, Content<D>)> for Exchange<T, D, P, H> {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
// if only one pusher, no exchange
if self.pushers.len() == 1 {
self.pushers[0].push(message);
}
else {
if let Some((ref time, ref mut data)) = *message {
// if the time isn't right, flush everything.
if self.current.as_ref().map_or(false, |x| x!= time) {
for index in 0..self.pushers.len() {
self.flush(index);
}
}
self.current = Some(time.clone());
// if the number of pushers is a power of two, use a mask
if (self.pushers.len() & (self.pushers.len() - 1)) == 0 {
let mask = (self.pushers.len() - 1) as u64;
for datum in data.drain(..) {
let index = (((self.hash_func)(&datum)) & mask) as usize;
self.buffers[index].push(datum);
if self.buffers[index].len() == self.buffers[index].capacity() {
self.flush(index);
}
// unsafe {
// self.buffers.get_unchecked_mut(index).push(datum);
// if self.buffers.get_unchecked(index).len() == self.buffers.get_unchecked(index).capacity() {
// self.flush(index);
// }
// }
}
}
// as a last resort, use mod (%)
else {
for datum in data.drain(..) {
let index = (((self.hash_func)(&datum)) % self.pushers.len() as u64) as usize;
self.buffers[index].push(datum);
if self.buffers[index].len() == self.buffers[index].capacity() {
self.flush(index);
}
}
}
}
else {
// flush
for index in 0..self.pushers.len() {
self.flush(index);
self.pushers[index].push(&mut None);
}
} | }
} | } | random_line_split |
exchange.rs | //! The exchange pattern distributes pushed data between many target pushees.
use {Push, Data};
use dataflow::channels::Content;
use abomonation::Abomonation;
// TODO : Software write combining
/// Distributes records among target pushees according to a distribution function.
pub struct Exchange<T, D, P: Push<(T, Content<D>)>, H: Fn(&D) -> u64> {
pushers: Vec<P>,
buffers: Vec<Vec<D>>,
current: Option<T>,
hash_func: H,
}
impl<T: Clone, D, P: Push<(T, Content<D>)>, H: Fn(&D)->u64> Exchange<T, D, P, H> {
/// Allocates a new `Exchange` from a supplied set of pushers and a distribution function.
pub fn new(pushers: Vec<P>, key: H) -> Exchange<T, D, P, H> {
let mut buffers = vec![];
for _ in 0..pushers.len() {
buffers.push(Vec::with_capacity(Content::<D>::default_length()));
}
Exchange {
pushers: pushers,
hash_func: key,
buffers: buffers,
current: None,
}
}
#[inline]
fn flush(&mut self, index: usize) {
if!self.buffers[index].is_empty() {
if let Some(ref time) = self.current {
Content::push_at(&mut self.buffers[index], time.clone(), &mut self.pushers[index]);
}
}
}
}
impl<T: Eq+Clone+'static, D: Data+Abomonation, P: Push<(T, Content<D>)>, H: Fn(&D)->u64> Push<(T, Content<D>)> for Exchange<T, D, P, H> {
#[inline]
fn | (&mut self, message: &mut Option<(T, Content<D>)>) {
// if only one pusher, no exchange
if self.pushers.len() == 1 {
self.pushers[0].push(message);
}
else {
if let Some((ref time, ref mut data)) = *message {
// if the time isn't right, flush everything.
if self.current.as_ref().map_or(false, |x| x!= time) {
for index in 0..self.pushers.len() {
self.flush(index);
}
}
self.current = Some(time.clone());
// if the number of pushers is a power of two, use a mask
if (self.pushers.len() & (self.pushers.len() - 1)) == 0 {
let mask = (self.pushers.len() - 1) as u64;
for datum in data.drain(..) {
let index = (((self.hash_func)(&datum)) & mask) as usize;
self.buffers[index].push(datum);
if self.buffers[index].len() == self.buffers[index].capacity() {
self.flush(index);
}
// unsafe {
// self.buffers.get_unchecked_mut(index).push(datum);
// if self.buffers.get_unchecked(index).len() == self.buffers.get_unchecked(index).capacity() {
// self.flush(index);
// }
// }
}
}
// as a last resort, use mod (%)
else {
for datum in data.drain(..) {
let index = (((self.hash_func)(&datum)) % self.pushers.len() as u64) as usize;
self.buffers[index].push(datum);
if self.buffers[index].len() == self.buffers[index].capacity() {
self.flush(index);
}
}
}
}
else {
// flush
for index in 0..self.pushers.len() {
self.flush(index);
self.pushers[index].push(&mut None);
}
}
}
}
}
| push | identifier_name |
exchange.rs | //! The exchange pattern distributes pushed data between many target pushees.
use {Push, Data};
use dataflow::channels::Content;
use abomonation::Abomonation;
// TODO : Software write combining
/// Distributes records among target pushees according to a distribution function.
pub struct Exchange<T, D, P: Push<(T, Content<D>)>, H: Fn(&D) -> u64> {
pushers: Vec<P>,
buffers: Vec<Vec<D>>,
current: Option<T>,
hash_func: H,
}
impl<T: Clone, D, P: Push<(T, Content<D>)>, H: Fn(&D)->u64> Exchange<T, D, P, H> {
/// Allocates a new `Exchange` from a supplied set of pushers and a distribution function.
pub fn new(pushers: Vec<P>, key: H) -> Exchange<T, D, P, H> {
let mut buffers = vec![];
for _ in 0..pushers.len() {
buffers.push(Vec::with_capacity(Content::<D>::default_length()));
}
Exchange {
pushers: pushers,
hash_func: key,
buffers: buffers,
current: None,
}
}
#[inline]
fn flush(&mut self, index: usize) {
if!self.buffers[index].is_empty() {
if let Some(ref time) = self.current {
Content::push_at(&mut self.buffers[index], time.clone(), &mut self.pushers[index]);
}
}
}
}
impl<T: Eq+Clone+'static, D: Data+Abomonation, P: Push<(T, Content<D>)>, H: Fn(&D)->u64> Push<(T, Content<D>)> for Exchange<T, D, P, H> {
#[inline]
fn push(&mut self, message: &mut Option<(T, Content<D>)>) {
// if only one pusher, no exchange
if self.pushers.len() == 1 {
self.pushers[0].push(message);
}
else {
if let Some((ref time, ref mut data)) = *message {
// if the time isn't right, flush everything.
if self.current.as_ref().map_or(false, |x| x!= time) {
for index in 0..self.pushers.len() {
self.flush(index);
}
}
self.current = Some(time.clone());
// if the number of pushers is a power of two, use a mask
if (self.pushers.len() & (self.pushers.len() - 1)) == 0 |
// as a last resort, use mod (%)
else {
for datum in data.drain(..) {
let index = (((self.hash_func)(&datum)) % self.pushers.len() as u64) as usize;
self.buffers[index].push(datum);
if self.buffers[index].len() == self.buffers[index].capacity() {
self.flush(index);
}
}
}
}
else {
// flush
for index in 0..self.pushers.len() {
self.flush(index);
self.pushers[index].push(&mut None);
}
}
}
}
}
| {
let mask = (self.pushers.len() - 1) as u64;
for datum in data.drain(..) {
let index = (((self.hash_func)(&datum)) & mask) as usize;
self.buffers[index].push(datum);
if self.buffers[index].len() == self.buffers[index].capacity() {
self.flush(index);
}
// unsafe {
// self.buffers.get_unchecked_mut(index).push(datum);
// if self.buffers.get_unchecked(index).len() == self.buffers.get_unchecked(index).capacity() {
// self.flush(index);
// }
// }
}
} | conditional_block |
serialise.rs | /* Notice: Copyright 2016, The Care Connections Initiative c.i.c.
* Author: Charlie Fyvie-Gauld ([email protected])
* License: GPLv3 (http://www.gnu.org/licenses/gpl-3.0.txt)
*/
use std::mem::transmute;
use ::enums::Failure;
pub trait NetSerial : Sized {
fn serialise(&self) -> Vec<u8>;
fn deserialise(bytes: &[u8]) -> Result<Self, Failure>;
fn lower_bound() -> usize;
}
pub fn push_bytes(v: &mut Vec<u8>, bytes: &[u8]) {
for b in bytes { | }
}
pub fn u32_transmute_be_arr(a: &[u8]) -> u32 {
unsafe { transmute::<[u8;4], u32>([a[3], a[2], a[1], a[0]]) }
}
pub fn u32_transmute_le_arr(a: &[u8]) -> u32 {
unsafe { transmute::<[u8;4], u32>([a[0], a[1], a[2], a[3]]) }
}
pub fn array_transmute_be_u32(d: u32) -> [u8;4] {
unsafe { transmute(d.to_be()) }
}
pub fn array_transmute_le_u32(d: u32) -> [u8;4] {
unsafe { transmute(d.to_le()) }
}
pub fn byte_slice_4array(a: &[u8]) -> [u8;4] {
[ a[0], a[1], a[2], a[3] ]
}
pub fn deserialise_bool(byte: u8) -> bool {
match byte {
0 => false,
_ => true
}
}
pub fn hex_str_to_byte(src: &[u8]) -> Option<u8> {
let mut val = 0;
let mut factor = 16;
for i in 0.. 2 {
val += match src[i] {
v @ 48... 57 => (v - 48) * factor,
v @ 65... 70 => (v - 55) * factor,
v @ 97... 102 => (v - 87) * factor,
_ => return None,
};
factor >>= 4;
}
Some(val)
}
pub fn bin_to_hex(src: &Vec<u8>) -> String {
let mut s = String::new();
for byte in src {
s.push_str( format!("{:0>2x}", byte).as_ref() )
}
s
} | v.push(*b) | random_line_split |
serialise.rs | /* Notice: Copyright 2016, The Care Connections Initiative c.i.c.
* Author: Charlie Fyvie-Gauld ([email protected])
* License: GPLv3 (http://www.gnu.org/licenses/gpl-3.0.txt)
*/
use std::mem::transmute;
use ::enums::Failure;
pub trait NetSerial : Sized {
fn serialise(&self) -> Vec<u8>;
fn deserialise(bytes: &[u8]) -> Result<Self, Failure>;
fn lower_bound() -> usize;
}
pub fn push_bytes(v: &mut Vec<u8>, bytes: &[u8]) {
for b in bytes {
v.push(*b)
}
}
pub fn u32_transmute_be_arr(a: &[u8]) -> u32 {
unsafe { transmute::<[u8;4], u32>([a[3], a[2], a[1], a[0]]) }
}
pub fn u32_transmute_le_arr(a: &[u8]) -> u32 {
unsafe { transmute::<[u8;4], u32>([a[0], a[1], a[2], a[3]]) }
}
pub fn array_transmute_be_u32(d: u32) -> [u8;4] |
pub fn array_transmute_le_u32(d: u32) -> [u8;4] {
unsafe { transmute(d.to_le()) }
}
pub fn byte_slice_4array(a: &[u8]) -> [u8;4] {
[ a[0], a[1], a[2], a[3] ]
}
pub fn deserialise_bool(byte: u8) -> bool {
match byte {
0 => false,
_ => true
}
}
pub fn hex_str_to_byte(src: &[u8]) -> Option<u8> {
let mut val = 0;
let mut factor = 16;
for i in 0.. 2 {
val += match src[i] {
v @ 48... 57 => (v - 48) * factor,
v @ 65... 70 => (v - 55) * factor,
v @ 97... 102 => (v - 87) * factor,
_ => return None,
};
factor >>= 4;
}
Some(val)
}
pub fn bin_to_hex(src: &Vec<u8>) -> String {
let mut s = String::new();
for byte in src {
s.push_str( format!("{:0>2x}", byte).as_ref() )
}
s
} | {
unsafe { transmute(d.to_be()) }
} | identifier_body |
serialise.rs | /* Notice: Copyright 2016, The Care Connections Initiative c.i.c.
* Author: Charlie Fyvie-Gauld ([email protected])
* License: GPLv3 (http://www.gnu.org/licenses/gpl-3.0.txt)
*/
use std::mem::transmute;
use ::enums::Failure;
pub trait NetSerial : Sized {
fn serialise(&self) -> Vec<u8>;
fn deserialise(bytes: &[u8]) -> Result<Self, Failure>;
fn lower_bound() -> usize;
}
pub fn push_bytes(v: &mut Vec<u8>, bytes: &[u8]) {
for b in bytes {
v.push(*b)
}
}
pub fn u32_transmute_be_arr(a: &[u8]) -> u32 {
unsafe { transmute::<[u8;4], u32>([a[3], a[2], a[1], a[0]]) }
}
pub fn u32_transmute_le_arr(a: &[u8]) -> u32 {
unsafe { transmute::<[u8;4], u32>([a[0], a[1], a[2], a[3]]) }
}
pub fn array_transmute_be_u32(d: u32) -> [u8;4] {
unsafe { transmute(d.to_be()) }
}
pub fn array_transmute_le_u32(d: u32) -> [u8;4] {
unsafe { transmute(d.to_le()) }
}
pub fn byte_slice_4array(a: &[u8]) -> [u8;4] {
[ a[0], a[1], a[2], a[3] ]
}
pub fn | (byte: u8) -> bool {
match byte {
0 => false,
_ => true
}
}
pub fn hex_str_to_byte(src: &[u8]) -> Option<u8> {
let mut val = 0;
let mut factor = 16;
for i in 0.. 2 {
val += match src[i] {
v @ 48... 57 => (v - 48) * factor,
v @ 65... 70 => (v - 55) * factor,
v @ 97... 102 => (v - 87) * factor,
_ => return None,
};
factor >>= 4;
}
Some(val)
}
pub fn bin_to_hex(src: &Vec<u8>) -> String {
let mut s = String::new();
for byte in src {
s.push_str( format!("{:0>2x}", byte).as_ref() )
}
s
} | deserialise_bool | identifier_name |
structsource0.rs | // On créé un type nommé `Borrowed` qui a pour attribut
// une référence d'un entier codé sur 32 bits. La référence
// doit survivre à l'instance de la structure `Borrowed`.
#[derive(Debug)]
struct Borrowed<'a>(&'a i32);
// Même combat, ces deux références doivent survivre à l'instance
// (ou aux instances) de la structure `NamedBorrowed`.
#[derive(Debug)]
struct NamedBorrowed<'a> {
x: &'a i32,
y: &'a i32,
}
// On créé une énumération qui contient deux variantes:
// 1. Un tuple qui prend en entrée un entier codé sur 32 bits;
// 2. Un tuple qui prend en entrée une référence d'un `i32`.
#[derive(Debug)]
enum Either<'a> {
Num(i | Ref(&'a i32),
}
fn main() {
let x = 18;
let y = 15;
let single = Borrowed(&x);
let double = NamedBorrowed { x: &x, y: &y };
let reference = Either::Ref(&x);
let number = Either::Num(y);
println!("x is borrowed in {:?}", single);
println!("x and y are borrowed in {:?}", double);
println!("x is borrowed in {:?}", reference);
println!("y is *not* borrowed in {:?}", number);
}
| 32),
| identifier_name |
structsource0.rs | // On créé un type nommé `Borrowed` qui a pour attribut
// une référence d'un entier codé sur 32 bits. La référence
// doit survivre à l'instance de la structure `Borrowed`.
#[derive(Debug)]
struct Borrowed<'a>(&'a i32);
// Même combat, ces deux références doivent survivre à l'instance
// (ou aux instances) de la structure `NamedBorrowed`.
#[derive(Debug)]
struct NamedBorrowed<'a> {
x: &'a i32,
y: &'a i32,
}
// On créé une énumération qui contient deux variantes:
// 1. Un tuple qui prend en entrée un entier codé sur 32 bits;
// 2. Un tuple qui prend en entrée une référence d'un `i32`.
#[derive(Debug)]
enum Either<'a> { | Ref(&'a i32),
}
fn main() {
let x = 18;
let y = 15;
let single = Borrowed(&x);
let double = NamedBorrowed { x: &x, y: &y };
let reference = Either::Ref(&x);
let number = Either::Num(y);
println!("x is borrowed in {:?}", single);
println!("x and y are borrowed in {:?}", double);
println!("x is borrowed in {:?}", reference);
println!("y is *not* borrowed in {:?}", number);
} | Num(i32), | random_line_split |
lib.rs | pub use platform::*;
pub type Handle = *const std::os::raw::c_void;
pub type Error = Box<dyn std::error::Error>;
pub const CURRENT_PROCESS: Handle = 0 as Handle;
#[cfg(unix)]
mod platform {
use super::{Error, Handle, CURRENT_PROCESS};
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_int, c_void};
use std::path::Path;
pub const DYLIB_SUBDIR: &str = "lib";
pub const DYLIB_PREFIX: &str = "lib";
#[cfg(target_os = "linux")]
pub const DYLIB_EXTENSION: &str = "so";
#[cfg(target_os = "macos")]
pub const DYLIB_EXTENSION: &str = "dylib";
#[cfg(target_os = "linux")]
mod constants {
use super::*;
pub const RTLD_LAZY: c_int = 0x1;
pub const RTLD_GLOBAL: c_int = 0x100;
pub const RTLD_DEFAULT: Handle = 0 as Handle;
}
#[cfg(target_os = "macos")]
mod constants {
use super::*;
pub const RTLD_LAZY: c_int = 0x1;
pub const RTLD_GLOBAL: c_int = 0x8;
pub const RTLD_DEFAULT: Handle = -2i32 as Handle;
}
pub use constants::*;
#[link(name = "dl")]
extern "C" {
fn dlopen(filename: *const c_char, flag: c_int) -> Handle;
fn dlclose(handle: Handle) -> c_int;
fn dlsym(handle: Handle, symbol: *const c_char) -> *const c_void;
fn dlerror() -> *const c_char;
}
pub unsafe fn load_library(path: &Path, global_symbols: bool) -> Result<Handle, Error> {
let cpath = CString::new(path.as_os_str().to_str().unwrap().as_bytes()).unwrap();
let flags = match global_symbols {
true => RTLD_LAZY | RTLD_GLOBAL,
false => RTLD_LAZY,
};
let handle = dlopen(cpath.as_ptr() as *const c_char, flags); | if handle.is_null() {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
} else {
Ok(handle)
}
}
pub unsafe fn free_library(handle: Handle) -> Result<(), Error> {
if dlclose(handle) == 0 {
Ok(())
} else {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
}
}
pub unsafe fn find_symbol(handle: Handle, name: &str) -> Result<*const c_void, Error> {
let cname = CString::new(name).unwrap();
let handle = match handle {
CURRENT_PROCESS => RTLD_DEFAULT,
_ => handle,
};
let ptr = dlsym(handle, cname.as_ptr() as *const c_char);
if ptr.is_null() {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
} else {
Ok(ptr)
}
}
}
#[cfg(windows)]
mod platform {
use super::{Error, Handle};
use std::env;
use std::ffi::{CString, OsStr, OsString};
use std::os::raw::{c_char, c_void};
use std::os::windows::ffi::*;
use std::path::Path;
pub const DYLIB_PREFIX: &str = "";
pub const DYLIB_EXTENSION: &str = "dll";
pub const DYLIB_SUBDIR: &str = "bin";
#[link(name = "kernel32")]
extern "system" {
fn LoadLibraryW(filename: *const u16) -> Handle;
fn FreeLibrary(handle: Handle) -> u32;
fn GetProcAddress(handle: Handle, symbol: *const c_char) -> *const c_void;
fn GetLastError() -> u32;
}
fn to_wstr(s: &OsStr) -> Vec<u16> {
s.encode_wide().chain(Some(0)).collect::<Vec<_>>()
}
pub fn add_library_directory(path: &Path) -> Result<(), Error> {
if!path.is_dir() {
return Err("Not a directory".into());
}
let mut os_path = OsString::from(path);
if let Some(val) = env::var_os("PATH") {
os_path.push(";");
os_path.push(val);
}
env::set_var("PATH", &os_path);
Ok(())
}
pub unsafe fn load_library(path: &Path, global_symbols: bool) -> Result<Handle, Error> {
if global_symbols {
if let Some(dir) = path.parent() {
add_library_directory(dir)?;
}
}
let handle = LoadLibraryW(to_wstr(path.as_os_str()).as_ptr());
if handle.is_null() {
Err(format!("Could not load {:?} (err={:08X})", path, GetLastError()).into())
} else {
Ok(handle)
}
}
pub unsafe fn free_library(handle: Handle) -> Result<(), Error> {
if FreeLibrary(handle)!= 0 {
Ok(())
} else {
Err(format!("Could not free library (err={:08X})", GetLastError()).into())
}
}
pub unsafe fn find_symbol(handle: Handle, name: &str) -> Result<*const c_void, Error> {
let cname = CString::new(name).unwrap();
let ptr = GetProcAddress(handle, cname.as_ptr() as *const c_char);
if ptr.is_null() {
Err(format!("Could not find {} (err={:08X})", name, GetLastError()).into())
} else {
Ok(ptr)
}
}
} | random_line_split |
|
lib.rs | pub use platform::*;
pub type Handle = *const std::os::raw::c_void;
pub type Error = Box<dyn std::error::Error>;
pub const CURRENT_PROCESS: Handle = 0 as Handle;
#[cfg(unix)]
mod platform {
use super::{Error, Handle, CURRENT_PROCESS};
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_int, c_void};
use std::path::Path;
pub const DYLIB_SUBDIR: &str = "lib";
pub const DYLIB_PREFIX: &str = "lib";
#[cfg(target_os = "linux")]
pub const DYLIB_EXTENSION: &str = "so";
#[cfg(target_os = "macos")]
pub const DYLIB_EXTENSION: &str = "dylib";
#[cfg(target_os = "linux")]
mod constants {
use super::*;
pub const RTLD_LAZY: c_int = 0x1;
pub const RTLD_GLOBAL: c_int = 0x100;
pub const RTLD_DEFAULT: Handle = 0 as Handle;
}
#[cfg(target_os = "macos")]
mod constants {
use super::*;
pub const RTLD_LAZY: c_int = 0x1;
pub const RTLD_GLOBAL: c_int = 0x8;
pub const RTLD_DEFAULT: Handle = -2i32 as Handle;
}
pub use constants::*;
#[link(name = "dl")]
extern "C" {
fn dlopen(filename: *const c_char, flag: c_int) -> Handle;
fn dlclose(handle: Handle) -> c_int;
fn dlsym(handle: Handle, symbol: *const c_char) -> *const c_void;
fn dlerror() -> *const c_char;
}
pub unsafe fn load_library(path: &Path, global_symbols: bool) -> Result<Handle, Error> {
let cpath = CString::new(path.as_os_str().to_str().unwrap().as_bytes()).unwrap();
let flags = match global_symbols {
true => RTLD_LAZY | RTLD_GLOBAL,
false => RTLD_LAZY,
};
let handle = dlopen(cpath.as_ptr() as *const c_char, flags);
if handle.is_null() {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
} else {
Ok(handle)
}
}
pub unsafe fn free_library(handle: Handle) -> Result<(), Error> {
if dlclose(handle) == 0 {
Ok(())
} else {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
}
}
pub unsafe fn find_symbol(handle: Handle, name: &str) -> Result<*const c_void, Error> {
let cname = CString::new(name).unwrap();
let handle = match handle {
CURRENT_PROCESS => RTLD_DEFAULT,
_ => handle,
};
let ptr = dlsym(handle, cname.as_ptr() as *const c_char);
if ptr.is_null() {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
} else {
Ok(ptr)
}
}
}
#[cfg(windows)]
mod platform {
use super::{Error, Handle};
use std::env;
use std::ffi::{CString, OsStr, OsString};
use std::os::raw::{c_char, c_void};
use std::os::windows::ffi::*;
use std::path::Path;
pub const DYLIB_PREFIX: &str = "";
pub const DYLIB_EXTENSION: &str = "dll";
pub const DYLIB_SUBDIR: &str = "bin";
#[link(name = "kernel32")]
extern "system" {
fn LoadLibraryW(filename: *const u16) -> Handle;
fn FreeLibrary(handle: Handle) -> u32;
fn GetProcAddress(handle: Handle, symbol: *const c_char) -> *const c_void;
fn GetLastError() -> u32;
}
fn to_wstr(s: &OsStr) -> Vec<u16> {
s.encode_wide().chain(Some(0)).collect::<Vec<_>>()
}
pub fn add_library_directory(path: &Path) -> Result<(), Error> {
if!path.is_dir() {
return Err("Not a directory".into());
}
let mut os_path = OsString::from(path);
if let Some(val) = env::var_os("PATH") {
os_path.push(";");
os_path.push(val);
}
env::set_var("PATH", &os_path);
Ok(())
}
pub unsafe fn load_library(path: &Path, global_symbols: bool) -> Result<Handle, Error> {
if global_symbols {
if let Some(dir) = path.parent() {
add_library_directory(dir)?;
}
}
let handle = LoadLibraryW(to_wstr(path.as_os_str()).as_ptr());
if handle.is_null() {
Err(format!("Could not load {:?} (err={:08X})", path, GetLastError()).into())
} else |
}
pub unsafe fn free_library(handle: Handle) -> Result<(), Error> {
if FreeLibrary(handle)!= 0 {
Ok(())
} else {
Err(format!("Could not free library (err={:08X})", GetLastError()).into())
}
}
pub unsafe fn find_symbol(handle: Handle, name: &str) -> Result<*const c_void, Error> {
let cname = CString::new(name).unwrap();
let ptr = GetProcAddress(handle, cname.as_ptr() as *const c_char);
if ptr.is_null() {
Err(format!("Could not find {} (err={:08X})", name, GetLastError()).into())
} else {
Ok(ptr)
}
}
}
| {
Ok(handle)
} | conditional_block |
lib.rs | pub use platform::*;
pub type Handle = *const std::os::raw::c_void;
pub type Error = Box<dyn std::error::Error>;
pub const CURRENT_PROCESS: Handle = 0 as Handle;
#[cfg(unix)]
mod platform {
use super::{Error, Handle, CURRENT_PROCESS};
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_int, c_void};
use std::path::Path;
pub const DYLIB_SUBDIR: &str = "lib";
pub const DYLIB_PREFIX: &str = "lib";
#[cfg(target_os = "linux")]
pub const DYLIB_EXTENSION: &str = "so";
#[cfg(target_os = "macos")]
pub const DYLIB_EXTENSION: &str = "dylib";
#[cfg(target_os = "linux")]
mod constants {
use super::*;
pub const RTLD_LAZY: c_int = 0x1;
pub const RTLD_GLOBAL: c_int = 0x100;
pub const RTLD_DEFAULT: Handle = 0 as Handle;
}
#[cfg(target_os = "macos")]
mod constants {
use super::*;
pub const RTLD_LAZY: c_int = 0x1;
pub const RTLD_GLOBAL: c_int = 0x8;
pub const RTLD_DEFAULT: Handle = -2i32 as Handle;
}
pub use constants::*;
#[link(name = "dl")]
extern "C" {
fn dlopen(filename: *const c_char, flag: c_int) -> Handle;
fn dlclose(handle: Handle) -> c_int;
fn dlsym(handle: Handle, symbol: *const c_char) -> *const c_void;
fn dlerror() -> *const c_char;
}
pub unsafe fn load_library(path: &Path, global_symbols: bool) -> Result<Handle, Error> {
let cpath = CString::new(path.as_os_str().to_str().unwrap().as_bytes()).unwrap();
let flags = match global_symbols {
true => RTLD_LAZY | RTLD_GLOBAL,
false => RTLD_LAZY,
};
let handle = dlopen(cpath.as_ptr() as *const c_char, flags);
if handle.is_null() {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
} else {
Ok(handle)
}
}
pub unsafe fn free_library(handle: Handle) -> Result<(), Error> {
if dlclose(handle) == 0 {
Ok(())
} else {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
}
}
pub unsafe fn find_symbol(handle: Handle, name: &str) -> Result<*const c_void, Error> {
let cname = CString::new(name).unwrap();
let handle = match handle {
CURRENT_PROCESS => RTLD_DEFAULT,
_ => handle,
};
let ptr = dlsym(handle, cname.as_ptr() as *const c_char);
if ptr.is_null() {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
} else {
Ok(ptr)
}
}
}
#[cfg(windows)]
mod platform {
use super::{Error, Handle};
use std::env;
use std::ffi::{CString, OsStr, OsString};
use std::os::raw::{c_char, c_void};
use std::os::windows::ffi::*;
use std::path::Path;
pub const DYLIB_PREFIX: &str = "";
pub const DYLIB_EXTENSION: &str = "dll";
pub const DYLIB_SUBDIR: &str = "bin";
#[link(name = "kernel32")]
extern "system" {
fn LoadLibraryW(filename: *const u16) -> Handle;
fn FreeLibrary(handle: Handle) -> u32;
fn GetProcAddress(handle: Handle, symbol: *const c_char) -> *const c_void;
fn GetLastError() -> u32;
}
fn to_wstr(s: &OsStr) -> Vec<u16> {
s.encode_wide().chain(Some(0)).collect::<Vec<_>>()
}
pub fn add_library_directory(path: &Path) -> Result<(), Error> {
if!path.is_dir() {
return Err("Not a directory".into());
}
let mut os_path = OsString::from(path);
if let Some(val) = env::var_os("PATH") {
os_path.push(";");
os_path.push(val);
}
env::set_var("PATH", &os_path);
Ok(())
}
pub unsafe fn load_library(path: &Path, global_symbols: bool) -> Result<Handle, Error> {
if global_symbols {
if let Some(dir) = path.parent() {
add_library_directory(dir)?;
}
}
let handle = LoadLibraryW(to_wstr(path.as_os_str()).as_ptr());
if handle.is_null() {
Err(format!("Could not load {:?} (err={:08X})", path, GetLastError()).into())
} else {
Ok(handle)
}
}
pub unsafe fn free_library(handle: Handle) -> Result<(), Error> |
pub unsafe fn find_symbol(handle: Handle, name: &str) -> Result<*const c_void, Error> {
let cname = CString::new(name).unwrap();
let ptr = GetProcAddress(handle, cname.as_ptr() as *const c_char);
if ptr.is_null() {
Err(format!("Could not find {} (err={:08X})", name, GetLastError()).into())
} else {
Ok(ptr)
}
}
}
| {
if FreeLibrary(handle) != 0 {
Ok(())
} else {
Err(format!("Could not free library (err={:08X})", GetLastError()).into())
}
} | identifier_body |
lib.rs | pub use platform::*;
pub type Handle = *const std::os::raw::c_void;
pub type Error = Box<dyn std::error::Error>;
pub const CURRENT_PROCESS: Handle = 0 as Handle;
#[cfg(unix)]
mod platform {
use super::{Error, Handle, CURRENT_PROCESS};
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_int, c_void};
use std::path::Path;
pub const DYLIB_SUBDIR: &str = "lib";
pub const DYLIB_PREFIX: &str = "lib";
#[cfg(target_os = "linux")]
pub const DYLIB_EXTENSION: &str = "so";
#[cfg(target_os = "macos")]
pub const DYLIB_EXTENSION: &str = "dylib";
#[cfg(target_os = "linux")]
mod constants {
use super::*;
pub const RTLD_LAZY: c_int = 0x1;
pub const RTLD_GLOBAL: c_int = 0x100;
pub const RTLD_DEFAULT: Handle = 0 as Handle;
}
#[cfg(target_os = "macos")]
mod constants {
use super::*;
pub const RTLD_LAZY: c_int = 0x1;
pub const RTLD_GLOBAL: c_int = 0x8;
pub const RTLD_DEFAULT: Handle = -2i32 as Handle;
}
pub use constants::*;
#[link(name = "dl")]
extern "C" {
fn dlopen(filename: *const c_char, flag: c_int) -> Handle;
fn dlclose(handle: Handle) -> c_int;
fn dlsym(handle: Handle, symbol: *const c_char) -> *const c_void;
fn dlerror() -> *const c_char;
}
pub unsafe fn load_library(path: &Path, global_symbols: bool) -> Result<Handle, Error> {
let cpath = CString::new(path.as_os_str().to_str().unwrap().as_bytes()).unwrap();
let flags = match global_symbols {
true => RTLD_LAZY | RTLD_GLOBAL,
false => RTLD_LAZY,
};
let handle = dlopen(cpath.as_ptr() as *const c_char, flags);
if handle.is_null() {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
} else {
Ok(handle)
}
}
pub unsafe fn free_library(handle: Handle) -> Result<(), Error> {
if dlclose(handle) == 0 {
Ok(())
} else {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
}
}
pub unsafe fn find_symbol(handle: Handle, name: &str) -> Result<*const c_void, Error> {
let cname = CString::new(name).unwrap();
let handle = match handle {
CURRENT_PROCESS => RTLD_DEFAULT,
_ => handle,
};
let ptr = dlsym(handle, cname.as_ptr() as *const c_char);
if ptr.is_null() {
Err(format!("{:?}", CStr::from_ptr(dlerror())).into())
} else {
Ok(ptr)
}
}
}
#[cfg(windows)]
mod platform {
use super::{Error, Handle};
use std::env;
use std::ffi::{CString, OsStr, OsString};
use std::os::raw::{c_char, c_void};
use std::os::windows::ffi::*;
use std::path::Path;
pub const DYLIB_PREFIX: &str = "";
pub const DYLIB_EXTENSION: &str = "dll";
pub const DYLIB_SUBDIR: &str = "bin";
#[link(name = "kernel32")]
extern "system" {
fn LoadLibraryW(filename: *const u16) -> Handle;
fn FreeLibrary(handle: Handle) -> u32;
fn GetProcAddress(handle: Handle, symbol: *const c_char) -> *const c_void;
fn GetLastError() -> u32;
}
fn to_wstr(s: &OsStr) -> Vec<u16> {
s.encode_wide().chain(Some(0)).collect::<Vec<_>>()
}
pub fn add_library_directory(path: &Path) -> Result<(), Error> {
if!path.is_dir() {
return Err("Not a directory".into());
}
let mut os_path = OsString::from(path);
if let Some(val) = env::var_os("PATH") {
os_path.push(";");
os_path.push(val);
}
env::set_var("PATH", &os_path);
Ok(())
}
pub unsafe fn load_library(path: &Path, global_symbols: bool) -> Result<Handle, Error> {
if global_symbols {
if let Some(dir) = path.parent() {
add_library_directory(dir)?;
}
}
let handle = LoadLibraryW(to_wstr(path.as_os_str()).as_ptr());
if handle.is_null() {
Err(format!("Could not load {:?} (err={:08X})", path, GetLastError()).into())
} else {
Ok(handle)
}
}
pub unsafe fn | (handle: Handle) -> Result<(), Error> {
if FreeLibrary(handle)!= 0 {
Ok(())
} else {
Err(format!("Could not free library (err={:08X})", GetLastError()).into())
}
}
pub unsafe fn find_symbol(handle: Handle, name: &str) -> Result<*const c_void, Error> {
let cname = CString::new(name).unwrap();
let ptr = GetProcAddress(handle, cname.as_ptr() as *const c_char);
if ptr.is_null() {
Err(format!("Could not find {} (err={:08X})", name, GetLastError()).into())
} else {
Ok(ptr)
}
}
}
| free_library | identifier_name |
mpsc_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module contains an implementation of a concurrent MPSC queue. This
//! queue can be used to share data between threads, and is also used as the
//! building block of channels in rust.
//!
//! Note that the current implementation of this queue has a caveat of the `pop`
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
pub use self::PopResult::*;
use std::mem;
use std::ptr;
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicPtr, Ordering};
/// A result of the `pop` function.
pub enum PopResult<T> {
/// Some data has been popped
Data(T),
/// The queue is empty
Empty,
/// The queue is in an inconsistent state. Popping data should succeed, but
/// some pushers have yet to make enough progress in order allow a pop to
/// succeed. It is recommended that a pop() occur "in the near future" in
/// order to see if the sender has made progress or not
Inconsistent,
}
struct Node<T> {
next: AtomicPtr<Node<T>>,
value: Option<T>,
}
/// The multi-producer single-consumer structure. This is not cloneable, but it
/// may be safely shared so long as it is guaranteed that there is only one
/// popper at a time (many pushers are allowed).
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
tail: UnsafeCell<*mut Node<T>>,
}
unsafe impl<T: Send> Send for Queue<T> { }
unsafe impl<T: Send> Sync for Queue<T> { }
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
mem::transmute(Box::new(Node {
next: AtomicPtr::new(ptr::null_mut()),
value: v,
}))
}
}
impl<T> Queue<T> {
/// Creates a new queue that is safe to share among multiple producers and
/// one consumer.
pub fn new() -> Queue<T> {
let stub = unsafe { Node::new(None) };
Queue {
head: AtomicPtr::new(stub),
tail: UnsafeCell::new(stub),
}
}
/// Pushes a new value onto this queue.
pub fn push(&self, t: T) {
unsafe {
let n = Node::new(Some(t));
let prev = self.head.swap(n, Ordering::AcqRel);
(*prev).next.store(n, Ordering::Release);
}
}
/// Pops some data from this queue.
///
/// Note that the current implementation means that this function cannot
/// return `Option<T>`. It is possible for this queue to be in an
/// inconsistent state where many pushes have succeeded and completely
/// finished, but pops cannot return `Some(t)`. This inconsistent state
/// happens when a pusher is pre-empted at an inopportune moment.
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
pub fn pop(&self) -> PopResult<T> {
unsafe {
let tail = *self.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if!next.is_null() {
*self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
let ret = (*next).value.take().unwrap();
let _: Box<Node<T>> = mem::transmute(tail);
return Data(ret);
}
if self.head.load(Ordering::Acquire) == tail | else {Inconsistent}
}
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.tail.get();
while!cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _: Box<Node<T>> = mem::transmute(cur);
cur = next;
}
}
}
}
| {Empty} | conditional_block |
mpsc_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module contains an implementation of a concurrent MPSC queue. This
//! queue can be used to share data between threads, and is also used as the
//! building block of channels in rust.
//!
//! Note that the current implementation of this queue has a caveat of the `pop`
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
pub use self::PopResult::*;
use std::mem;
use std::ptr;
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicPtr, Ordering};
/// A result of the `pop` function.
pub enum PopResult<T> {
/// Some data has been popped
Data(T),
/// The queue is empty
Empty,
/// The queue is in an inconsistent state. Popping data should succeed, but
/// some pushers have yet to make enough progress in order allow a pop to
/// succeed. It is recommended that a pop() occur "in the near future" in
/// order to see if the sender has made progress or not
Inconsistent,
}
struct Node<T> {
next: AtomicPtr<Node<T>>,
value: Option<T>,
}
/// The multi-producer single-consumer structure. This is not cloneable, but it
/// may be safely shared so long as it is guaranteed that there is only one
/// popper at a time (many pushers are allowed).
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
tail: UnsafeCell<*mut Node<T>>,
}
unsafe impl<T: Send> Send for Queue<T> { }
unsafe impl<T: Send> Sync for Queue<T> { }
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
mem::transmute(Box::new(Node {
next: AtomicPtr::new(ptr::null_mut()),
value: v,
}))
}
}
impl<T> Queue<T> {
/// Creates a new queue that is safe to share among multiple producers and
/// one consumer.
pub fn new() -> Queue<T> {
let stub = unsafe { Node::new(None) };
Queue {
head: AtomicPtr::new(stub),
tail: UnsafeCell::new(stub),
}
}
/// Pushes a new value onto this queue.
pub fn push(&self, t: T) {
unsafe {
let n = Node::new(Some(t));
let prev = self.head.swap(n, Ordering::AcqRel);
(*prev).next.store(n, Ordering::Release);
}
}
/// Pops some data from this queue.
///
/// Note that the current implementation means that this function cannot
/// return `Option<T>`. It is possible for this queue to be in an
/// inconsistent state where many pushes have succeeded and completely
/// finished, but pops cannot return `Some(t)`. This inconsistent state
/// happens when a pusher is pre-empted at an inopportune moment.
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
pub fn pop(&self) -> PopResult<T> {
unsafe {
let tail = *self.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if!next.is_null() {
*self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
let ret = (*next).value.take().unwrap();
let _: Box<Node<T>> = mem::transmute(tail);
return Data(ret);
}
if self.head.load(Ordering::Acquire) == tail {Empty} else {Inconsistent}
}
}
}
impl<T> Drop for Queue<T> {
fn | (&mut self) {
unsafe {
let mut cur = *self.tail.get();
while!cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _: Box<Node<T>> = mem::transmute(cur);
cur = next;
}
}
}
}
| drop | identifier_name |
mpsc_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module contains an implementation of a concurrent MPSC queue. This
//! queue can be used to share data between threads, and is also used as the
//! building block of channels in rust.
//!
//! Note that the current implementation of this queue has a caveat of the `pop`
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
pub use self::PopResult::*;
use std::mem;
use std::ptr;
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicPtr, Ordering};
/// A result of the `pop` function.
pub enum PopResult<T> {
/// Some data has been popped
Data(T),
/// The queue is empty
Empty,
/// The queue is in an inconsistent state. Popping data should succeed, but
/// some pushers have yet to make enough progress in order allow a pop to
/// succeed. It is recommended that a pop() occur "in the near future" in
/// order to see if the sender has made progress or not
Inconsistent,
}
struct Node<T> {
next: AtomicPtr<Node<T>>,
value: Option<T>,
}
/// The multi-producer single-consumer structure. This is not cloneable, but it
/// may be safely shared so long as it is guaranteed that there is only one
/// popper at a time (many pushers are allowed).
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
tail: UnsafeCell<*mut Node<T>>,
}
unsafe impl<T: Send> Send for Queue<T> { }
unsafe impl<T: Send> Sync for Queue<T> { }
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
mem::transmute(Box::new(Node {
next: AtomicPtr::new(ptr::null_mut()),
value: v,
}))
}
}
impl<T> Queue<T> {
/// Creates a new queue that is safe to share among multiple producers and
/// one consumer.
pub fn new() -> Queue<T> {
let stub = unsafe { Node::new(None) };
Queue {
head: AtomicPtr::new(stub),
tail: UnsafeCell::new(stub),
}
}
/// Pushes a new value onto this queue.
pub fn push(&self, t: T) {
unsafe {
let n = Node::new(Some(t));
let prev = self.head.swap(n, Ordering::AcqRel);
(*prev).next.store(n, Ordering::Release);
}
}
/// Pops some data from this queue.
///
/// Note that the current implementation means that this function cannot
/// return `Option<T>`. It is possible for this queue to be in an
/// inconsistent state where many pushes have succeeded and completely
/// finished, but pops cannot return `Some(t)`. This inconsistent state
/// happens when a pusher is pre-empted at an inopportune moment.
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
pub fn pop(&self) -> PopResult<T> {
unsafe {
let tail = *self.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if!next.is_null() {
*self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
let ret = (*next).value.take().unwrap();
let _: Box<Node<T>> = mem::transmute(tail);
return Data(ret);
}
if self.head.load(Ordering::Acquire) == tail {Empty} else {Inconsistent}
}
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.tail.get();
while!cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _: Box<Node<T>> = mem::transmute(cur);
cur = next;
}
}
}
} | random_line_split |
|
mpsc_queue.rs | /* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are
* those of the authors and should not be interpreted as representing official
* policies, either expressed or implied, of Dmitry Vyukov.
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module contains an implementation of a concurrent MPSC queue. This
//! queue can be used to share data between threads, and is also used as the
//! building block of channels in rust.
//!
//! Note that the current implementation of this queue has a caveat of the `pop`
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
pub use self::PopResult::*;
use std::mem;
use std::ptr;
use std::cell::UnsafeCell;
use std::sync::atomic::{AtomicPtr, Ordering};
/// A result of the `pop` function.
pub enum PopResult<T> {
/// Some data has been popped
Data(T),
/// The queue is empty
Empty,
/// The queue is in an inconsistent state. Popping data should succeed, but
/// some pushers have yet to make enough progress in order allow a pop to
/// succeed. It is recommended that a pop() occur "in the near future" in
/// order to see if the sender has made progress or not
Inconsistent,
}
struct Node<T> {
next: AtomicPtr<Node<T>>,
value: Option<T>,
}
/// The multi-producer single-consumer structure. This is not cloneable, but it
/// may be safely shared so long as it is guaranteed that there is only one
/// popper at a time (many pushers are allowed).
pub struct Queue<T> {
head: AtomicPtr<Node<T>>,
tail: UnsafeCell<*mut Node<T>>,
}
unsafe impl<T: Send> Send for Queue<T> { }
unsafe impl<T: Send> Sync for Queue<T> { }
impl<T> Node<T> {
unsafe fn new(v: Option<T>) -> *mut Node<T> {
mem::transmute(Box::new(Node {
next: AtomicPtr::new(ptr::null_mut()),
value: v,
}))
}
}
impl<T> Queue<T> {
/// Creates a new queue that is safe to share among multiple producers and
/// one consumer.
pub fn new() -> Queue<T> |
/// Pushes a new value onto this queue.
pub fn push(&self, t: T) {
unsafe {
let n = Node::new(Some(t));
let prev = self.head.swap(n, Ordering::AcqRel);
(*prev).next.store(n, Ordering::Release);
}
}
/// Pops some data from this queue.
///
/// Note that the current implementation means that this function cannot
/// return `Option<T>`. It is possible for this queue to be in an
/// inconsistent state where many pushes have succeeded and completely
/// finished, but pops cannot return `Some(t)`. This inconsistent state
/// happens when a pusher is pre-empted at an inopportune moment.
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
pub fn pop(&self) -> PopResult<T> {
unsafe {
let tail = *self.tail.get();
let next = (*tail).next.load(Ordering::Acquire);
if!next.is_null() {
*self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
let ret = (*next).value.take().unwrap();
let _: Box<Node<T>> = mem::transmute(tail);
return Data(ret);
}
if self.head.load(Ordering::Acquire) == tail {Empty} else {Inconsistent}
}
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
let mut cur = *self.tail.get();
while!cur.is_null() {
let next = (*cur).next.load(Ordering::Relaxed);
let _: Box<Node<T>> = mem::transmute(cur);
cur = next;
}
}
}
}
| {
let stub = unsafe { Node::new(None) };
Queue {
head: AtomicPtr::new(stub),
tail: UnsafeCell::new(stub),
}
} | identifier_body |
pod_account.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use state::Account;
use account_db::AccountDBMut;
use ethjson;
use types::account_diff::*;
use rlp::{self, RlpStream, Stream};
#[derive(Debug, Clone, PartialEq, Eq)]
/// An account, expressed as Plain-Old-Data (hence the name).
/// Does not have a DB overlay cache, code hash or anything like that.
pub struct PodAccount {
/// The balance of the account.
pub balance: U256,
/// The nonce of the account.
pub nonce: U256,
/// The code of the account or `None` in the special case that it is unknown.
pub code: Option<Bytes>,
/// The storage of the account.
pub storage: BTreeMap<H256, H256>,
}
impl PodAccount {
/// Construct new object.
#[cfg(test)]
pub fn new(balance: U256, nonce: U256, code: Bytes, storage: BTreeMap<H256, H256>) -> PodAccount {
PodAccount { balance: balance, nonce: nonce, code: Some(code), storage: storage }
}
/// Convert Account to a PodAccount.
/// NOTE: This will silently fail unless the account is fully cached.
pub fn from_account(acc: &Account) -> PodAccount {
PodAccount {
balance: *acc.balance(),
nonce: *acc.nonce(),
storage: acc.storage_changes().iter().fold(BTreeMap::new(), |mut m, (k, v)| {m.insert(k.clone(), v.clone()); m}),
code: acc.code().map(|x| x.to_vec()),
}
}
/// Returns the RLP for this account.
pub fn rlp(&self) -> Bytes {
let mut stream = RlpStream::new_list(4);
stream.append(&self.nonce);
stream.append(&self.balance);
stream.append(&sec_trie_root(self.storage.iter().map(|(k, v)| (k.to_vec(), rlp::encode(&U256::from(&**v)).to_vec())).collect()));
stream.append(&self.code.as_ref().unwrap_or(&vec![]).sha3());
stream.out()
}
/// Place additional data into given hash DB.
pub fn insert_additional(&self, db: &mut AccountDBMut, factory: &TrieFactory) {
match self.code {
Some(ref c) if!c.is_empty() => { db.insert(c); }
_ => {}
}
let mut r = H256::new();
let mut t = factory.create(db, &mut r);
for (k, v) in &self.storage {
if let Err(e) = t.insert(k, &rlp::encode(&U256::from(&**v))) {
warn!("Encountered potential DB corruption: {}", e);
}
}
}
}
impl From<ethjson::blockchain::Account> for PodAccount {
fn from(a: ethjson::blockchain::Account) -> Self {
PodAccount {
balance: a.balance.into(),
nonce: a.nonce.into(),
code: Some(a.code.into()),
storage: a.storage.into_iter().map(|(key, value)| {
let key: U256 = key.into();
let value: U256 = value.into();
(H256::from(key), H256::from(value))
}).collect(),
}
}
}
impl From<ethjson::spec::Account> for PodAccount {
fn from(a: ethjson::spec::Account) -> Self {
PodAccount {
balance: a.balance.map_or_else(U256::zero, Into::into),
nonce: a.nonce.map_or_else(U256::zero, Into::into),
code: Some(a.code.map_or_else(Vec::new, Into::into)),
storage: a.storage.map_or_else(BTreeMap::new, |s| s.into_iter().map(|(key, value)| {
let key: U256 = key.into();
let value: U256 = value.into();
(H256::from(key), H256::from(value))
}).collect()),
}
}
}
impl fmt::Display for PodAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)",
self.balance,
self.nonce,
self.code.as_ref().map_or(0, |c| c.len()),
self.code.as_ref().map_or_else(H256::new, |c| c.sha3()),
self.storage.len(),
)
}
}
/// Determine difference between two optionally existant `Account`s. Returns None
/// if they are the same.
pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<AccountDiff> {
match (pre, post) { | balance: Diff::Born(x.balance),
nonce: Diff::Born(x.nonce),
code: Diff::Born(x.code.as_ref().expect("account is newly created; newly created accounts must be given code; all caches should remain in place; qed").clone()),
storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Born(v.clone()))).collect(),
}),
(Some(x), None) => Some(AccountDiff {
balance: Diff::Died(x.balance),
nonce: Diff::Died(x.nonce),
code: Diff::Died(x.code.as_ref().expect("account is deleted; only way to delete account is running SUICIDE; account must have had own code cached to make operation; all caches should remain in place; qed").clone()),
storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Died(v.clone()))).collect(),
}),
(Some(pre), Some(post)) => {
let storage: Vec<_> = pre.storage.keys().merge(post.storage.keys())
.filter(|k| pre.storage.get(k).unwrap_or(&H256::new())!= post.storage.get(k).unwrap_or(&H256::new()))
.collect();
let r = AccountDiff {
balance: Diff::new(pre.balance, post.balance),
nonce: Diff::new(pre.nonce, post.nonce),
code: match (pre.code.clone(), post.code.clone()) {
(Some(pre_code), Some(post_code)) => Diff::new(pre_code, post_code),
_ => Diff::Same,
},
storage: storage.into_iter().map(|k|
(k.clone(), Diff::new(
pre.storage.get(k).cloned().unwrap_or_else(H256::new),
post.storage.get(k).cloned().unwrap_or_else(H256::new)
))).collect(),
};
if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() {
None
} else {
Some(r)
}
},
_ => None,
}
}
#[cfg(test)]
mod test {
use util::*;
use types::account_diff::*;
use super::{PodAccount, diff_pod};
#[test]
fn existence() {
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&a)), None);
assert_eq!(diff_pod(None, Some(&a)), Some(AccountDiff{
balance: Diff::Born(69.into()),
nonce: Diff::Born(0.into()),
code: Diff::Born(vec![]),
storage: map![],
}));
}
#[test]
fn basic() {
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
let b = PodAccount{balance: 42.into(), nonce: 1.into(), code: Some(vec![]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Changed(69.into(), 42.into()),
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Same,
storage: map![],
}));
}
#[test]
fn code() {
let a = PodAccount{balance: 0.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
let b = PodAccount{balance: 0.into(), nonce: 1.into(), code: Some(vec![0]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same,
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Changed(vec![], vec![0]),
storage: map![],
}));
}
#[test]
fn storage() {
let a = PodAccount {
balance: 0.into(),
nonce: 0.into(),
code: Some(vec![]),
storage: map_into![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0]
};
let b = PodAccount {
balance: 0.into(),
nonce: 0.into(),
code: Some(vec![]),
storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9]
};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same,
nonce: Diff::Same,
code: Diff::Same,
storage: map![
2.into() => Diff::new(2.into(), 3.into()),
3.into() => Diff::new(3.into(), 0.into()),
4.into() => Diff::new(4.into(), 0.into()),
7.into() => Diff::new(0.into(), 7.into()),
9.into() => Diff::new(0.into(), 9.into())
],
}));
}
} | (None, Some(x)) => Some(AccountDiff { | random_line_split |
pod_account.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use state::Account;
use account_db::AccountDBMut;
use ethjson;
use types::account_diff::*;
use rlp::{self, RlpStream, Stream};
#[derive(Debug, Clone, PartialEq, Eq)]
/// An account, expressed as Plain-Old-Data (hence the name).
/// Does not have a DB overlay cache, code hash or anything like that.
pub struct PodAccount {
/// The balance of the account.
pub balance: U256,
/// The nonce of the account.
pub nonce: U256,
/// The code of the account or `None` in the special case that it is unknown.
pub code: Option<Bytes>,
/// The storage of the account.
pub storage: BTreeMap<H256, H256>,
}
impl PodAccount {
/// Construct new object.
#[cfg(test)]
pub fn new(balance: U256, nonce: U256, code: Bytes, storage: BTreeMap<H256, H256>) -> PodAccount {
PodAccount { balance: balance, nonce: nonce, code: Some(code), storage: storage }
}
/// Convert Account to a PodAccount.
/// NOTE: This will silently fail unless the account is fully cached.
pub fn from_account(acc: &Account) -> PodAccount {
PodAccount {
balance: *acc.balance(),
nonce: *acc.nonce(),
storage: acc.storage_changes().iter().fold(BTreeMap::new(), |mut m, (k, v)| {m.insert(k.clone(), v.clone()); m}),
code: acc.code().map(|x| x.to_vec()),
}
}
/// Returns the RLP for this account.
pub fn rlp(&self) -> Bytes {
let mut stream = RlpStream::new_list(4);
stream.append(&self.nonce);
stream.append(&self.balance);
stream.append(&sec_trie_root(self.storage.iter().map(|(k, v)| (k.to_vec(), rlp::encode(&U256::from(&**v)).to_vec())).collect()));
stream.append(&self.code.as_ref().unwrap_or(&vec![]).sha3());
stream.out()
}
/// Place additional data into given hash DB.
pub fn insert_additional(&self, db: &mut AccountDBMut, factory: &TrieFactory) {
match self.code {
Some(ref c) if!c.is_empty() => { db.insert(c); }
_ => {}
}
let mut r = H256::new();
let mut t = factory.create(db, &mut r);
for (k, v) in &self.storage {
if let Err(e) = t.insert(k, &rlp::encode(&U256::from(&**v))) {
warn!("Encountered potential DB corruption: {}", e);
}
}
}
}
impl From<ethjson::blockchain::Account> for PodAccount {
fn from(a: ethjson::blockchain::Account) -> Self {
PodAccount {
balance: a.balance.into(),
nonce: a.nonce.into(),
code: Some(a.code.into()),
storage: a.storage.into_iter().map(|(key, value)| {
let key: U256 = key.into();
let value: U256 = value.into();
(H256::from(key), H256::from(value))
}).collect(),
}
}
}
impl From<ethjson::spec::Account> for PodAccount {
fn from(a: ethjson::spec::Account) -> Self {
PodAccount {
balance: a.balance.map_or_else(U256::zero, Into::into),
nonce: a.nonce.map_or_else(U256::zero, Into::into),
code: Some(a.code.map_or_else(Vec::new, Into::into)),
storage: a.storage.map_or_else(BTreeMap::new, |s| s.into_iter().map(|(key, value)| {
let key: U256 = key.into();
let value: U256 = value.into();
(H256::from(key), H256::from(value))
}).collect()),
}
}
}
impl fmt::Display for PodAccount {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)",
self.balance,
self.nonce,
self.code.as_ref().map_or(0, |c| c.len()),
self.code.as_ref().map_or_else(H256::new, |c| c.sha3()),
self.storage.len(),
)
}
}
/// Determine difference between two optionally existant `Account`s. Returns None
/// if they are the same.
pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<AccountDiff> {
match (pre, post) {
(None, Some(x)) => Some(AccountDiff {
balance: Diff::Born(x.balance),
nonce: Diff::Born(x.nonce),
code: Diff::Born(x.code.as_ref().expect("account is newly created; newly created accounts must be given code; all caches should remain in place; qed").clone()),
storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Born(v.clone()))).collect(),
}),
(Some(x), None) => Some(AccountDiff {
balance: Diff::Died(x.balance),
nonce: Diff::Died(x.nonce),
code: Diff::Died(x.code.as_ref().expect("account is deleted; only way to delete account is running SUICIDE; account must have had own code cached to make operation; all caches should remain in place; qed").clone()),
storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Died(v.clone()))).collect(),
}),
(Some(pre), Some(post)) => {
let storage: Vec<_> = pre.storage.keys().merge(post.storage.keys())
.filter(|k| pre.storage.get(k).unwrap_or(&H256::new())!= post.storage.get(k).unwrap_or(&H256::new()))
.collect();
let r = AccountDiff {
balance: Diff::new(pre.balance, post.balance),
nonce: Diff::new(pre.nonce, post.nonce),
code: match (pre.code.clone(), post.code.clone()) {
(Some(pre_code), Some(post_code)) => Diff::new(pre_code, post_code),
_ => Diff::Same,
},
storage: storage.into_iter().map(|k|
(k.clone(), Diff::new(
pre.storage.get(k).cloned().unwrap_or_else(H256::new),
post.storage.get(k).cloned().unwrap_or_else(H256::new)
))).collect(),
};
if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() {
None
} else {
Some(r)
}
},
_ => None,
}
}
#[cfg(test)]
mod test {
use util::*;
use types::account_diff::*;
use super::{PodAccount, diff_pod};
#[test]
fn existence() {
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&a)), None);
assert_eq!(diff_pod(None, Some(&a)), Some(AccountDiff{
balance: Diff::Born(69.into()),
nonce: Diff::Born(0.into()),
code: Diff::Born(vec![]),
storage: map![],
}));
}
#[test]
fn basic() {
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
let b = PodAccount{balance: 42.into(), nonce: 1.into(), code: Some(vec![]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Changed(69.into(), 42.into()),
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Same,
storage: map![],
}));
}
#[test]
fn code() {
let a = PodAccount{balance: 0.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
let b = PodAccount{balance: 0.into(), nonce: 1.into(), code: Some(vec![0]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same,
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Changed(vec![], vec![0]),
storage: map![],
}));
}
#[test]
fn storage() {
let a = PodAccount {
balance: 0.into(),
nonce: 0.into(),
code: Some(vec![]),
storage: map_into![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0]
};
let b = PodAccount {
balance: 0.into(),
nonce: 0.into(),
code: Some(vec![]),
storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9]
};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same,
nonce: Diff::Same,
code: Diff::Same,
storage: map![
2.into() => Diff::new(2.into(), 3.into()),
3.into() => Diff::new(3.into(), 0.into()),
4.into() => Diff::new(4.into(), 0.into()),
7.into() => Diff::new(0.into(), 7.into()),
9.into() => Diff::new(0.into(), 9.into())
],
}));
}
}
| fmt | identifier_name |
pod_account.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use state::Account;
use account_db::AccountDBMut;
use ethjson;
use types::account_diff::*;
use rlp::{self, RlpStream, Stream};
#[derive(Debug, Clone, PartialEq, Eq)]
/// An account, expressed as Plain-Old-Data (hence the name).
/// Does not have a DB overlay cache, code hash or anything like that.
pub struct PodAccount {
/// The balance of the account.
pub balance: U256,
/// The nonce of the account.
pub nonce: U256,
/// The code of the account or `None` in the special case that it is unknown.
pub code: Option<Bytes>,
/// The storage of the account.
pub storage: BTreeMap<H256, H256>,
}
impl PodAccount {
/// Construct new object.
#[cfg(test)]
pub fn new(balance: U256, nonce: U256, code: Bytes, storage: BTreeMap<H256, H256>) -> PodAccount {
PodAccount { balance: balance, nonce: nonce, code: Some(code), storage: storage }
}
/// Convert Account to a PodAccount.
/// NOTE: This will silently fail unless the account is fully cached.
pub fn from_account(acc: &Account) -> PodAccount |
/// Returns the RLP for this account.
pub fn rlp(&self) -> Bytes {
let mut stream = RlpStream::new_list(4);
stream.append(&self.nonce);
stream.append(&self.balance);
stream.append(&sec_trie_root(self.storage.iter().map(|(k, v)| (k.to_vec(), rlp::encode(&U256::from(&**v)).to_vec())).collect()));
stream.append(&self.code.as_ref().unwrap_or(&vec![]).sha3());
stream.out()
}
/// Place additional data into given hash DB.
pub fn insert_additional(&self, db: &mut AccountDBMut, factory: &TrieFactory) {
match self.code {
Some(ref c) if!c.is_empty() => { db.insert(c); }
_ => {}
}
let mut r = H256::new();
let mut t = factory.create(db, &mut r);
for (k, v) in &self.storage {
if let Err(e) = t.insert(k, &rlp::encode(&U256::from(&**v))) {
warn!("Encountered potential DB corruption: {}", e);
}
}
}
}
impl From<ethjson::blockchain::Account> for PodAccount {
fn from(a: ethjson::blockchain::Account) -> Self {
PodAccount {
balance: a.balance.into(),
nonce: a.nonce.into(),
code: Some(a.code.into()),
storage: a.storage.into_iter().map(|(key, value)| {
let key: U256 = key.into();
let value: U256 = value.into();
(H256::from(key), H256::from(value))
}).collect(),
}
}
}
impl From<ethjson::spec::Account> for PodAccount {
fn from(a: ethjson::spec::Account) -> Self {
PodAccount {
balance: a.balance.map_or_else(U256::zero, Into::into),
nonce: a.nonce.map_or_else(U256::zero, Into::into),
code: Some(a.code.map_or_else(Vec::new, Into::into)),
storage: a.storage.map_or_else(BTreeMap::new, |s| s.into_iter().map(|(key, value)| {
let key: U256 = key.into();
let value: U256 = value.into();
(H256::from(key), H256::from(value))
}).collect()),
}
}
}
impl fmt::Display for PodAccount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)",
self.balance,
self.nonce,
self.code.as_ref().map_or(0, |c| c.len()),
self.code.as_ref().map_or_else(H256::new, |c| c.sha3()),
self.storage.len(),
)
}
}
/// Determine difference between two optionally existant `Account`s. Returns None
/// if they are the same.
pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option<AccountDiff> {
match (pre, post) {
(None, Some(x)) => Some(AccountDiff {
balance: Diff::Born(x.balance),
nonce: Diff::Born(x.nonce),
code: Diff::Born(x.code.as_ref().expect("account is newly created; newly created accounts must be given code; all caches should remain in place; qed").clone()),
storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Born(v.clone()))).collect(),
}),
(Some(x), None) => Some(AccountDiff {
balance: Diff::Died(x.balance),
nonce: Diff::Died(x.nonce),
code: Diff::Died(x.code.as_ref().expect("account is deleted; only way to delete account is running SUICIDE; account must have had own code cached to make operation; all caches should remain in place; qed").clone()),
storage: x.storage.iter().map(|(k, v)| (k.clone(), Diff::Died(v.clone()))).collect(),
}),
(Some(pre), Some(post)) => {
let storage: Vec<_> = pre.storage.keys().merge(post.storage.keys())
.filter(|k| pre.storage.get(k).unwrap_or(&H256::new())!= post.storage.get(k).unwrap_or(&H256::new()))
.collect();
let r = AccountDiff {
balance: Diff::new(pre.balance, post.balance),
nonce: Diff::new(pre.nonce, post.nonce),
code: match (pre.code.clone(), post.code.clone()) {
(Some(pre_code), Some(post_code)) => Diff::new(pre_code, post_code),
_ => Diff::Same,
},
storage: storage.into_iter().map(|k|
(k.clone(), Diff::new(
pre.storage.get(k).cloned().unwrap_or_else(H256::new),
post.storage.get(k).cloned().unwrap_or_else(H256::new)
))).collect(),
};
if r.balance.is_same() && r.nonce.is_same() && r.code.is_same() && r.storage.is_empty() {
None
} else {
Some(r)
}
},
_ => None,
}
}
#[cfg(test)]
mod test {
use util::*;
use types::account_diff::*;
use super::{PodAccount, diff_pod};
#[test]
fn existence() {
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&a)), None);
assert_eq!(diff_pod(None, Some(&a)), Some(AccountDiff{
balance: Diff::Born(69.into()),
nonce: Diff::Born(0.into()),
code: Diff::Born(vec![]),
storage: map![],
}));
}
#[test]
fn basic() {
let a = PodAccount{balance: 69.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
let b = PodAccount{balance: 42.into(), nonce: 1.into(), code: Some(vec![]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Changed(69.into(), 42.into()),
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Same,
storage: map![],
}));
}
#[test]
fn code() {
let a = PodAccount{balance: 0.into(), nonce: 0.into(), code: Some(vec![]), storage: map![]};
let b = PodAccount{balance: 0.into(), nonce: 1.into(), code: Some(vec![0]), storage: map![]};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same,
nonce: Diff::Changed(0.into(), 1.into()),
code: Diff::Changed(vec![], vec![0]),
storage: map![],
}));
}
#[test]
fn storage() {
let a = PodAccount {
balance: 0.into(),
nonce: 0.into(),
code: Some(vec![]),
storage: map_into![1 => 1, 2 => 2, 3 => 3, 4 => 4, 5 => 0, 6 => 0, 7 => 0]
};
let b = PodAccount {
balance: 0.into(),
nonce: 0.into(),
code: Some(vec![]),
storage: map_into![1 => 1, 2 => 3, 3 => 0, 5 => 0, 7 => 7, 8 => 0, 9 => 9]
};
assert_eq!(diff_pod(Some(&a), Some(&b)), Some(AccountDiff {
balance: Diff::Same,
nonce: Diff::Same,
code: Diff::Same,
storage: map![
2.into() => Diff::new(2.into(), 3.into()),
3.into() => Diff::new(3.into(), 0.into()),
4.into() => Diff::new(4.into(), 0.into()),
7.into() => Diff::new(0.into(), 7.into()),
9.into() => Diff::new(0.into(), 9.into())
],
}));
}
}
| {
PodAccount {
balance: *acc.balance(),
nonce: *acc.nonce(),
storage: acc.storage_changes().iter().fold(BTreeMap::new(), |mut m, (k, v)| {m.insert(k.clone(), v.clone()); m}),
code: acc.code().map(|x| x.to_vec()),
}
} | identifier_body |
walk.rs | use super::tree::*;
use super::super::Graph;
use super::super::dominators::Dominators;
use super::super::node_vec::NodeVec;
use std::collections::HashSet;
use std::default::Default;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum NodeState {
NotYetStarted,
InProgress(Option<LoopId>),
FinishedHeadWalk,
EnqueuedExitWalk,
}
use self::NodeState::*;
impl Default for NodeState {
fn default() -> Self {
NotYetStarted
}
}
pub struct LoopTreeWalk<'walk, G: Graph + 'walk> {
graph: &'walk G,
dominators: &'walk Dominators<G>,
state: NodeVec<G, NodeState>,
loop_tree: LoopTree<G>,
}
impl<'walk, G: Graph> LoopTreeWalk<'walk, G> {
pub fn new(graph: &'walk G,
dominators: &'walk Dominators<G>)
-> Self {
LoopTreeWalk {
graph: graph,
dominators: dominators,
state: NodeVec::from_default(graph),
loop_tree: LoopTree::new(graph),
}
}
pub fn compute_loop_tree(mut self) -> LoopTree<G> {
self.head_walk(self.graph.start_node());
self.exit_walk(self.graph.start_node());
self.loop_tree
}
/// First walk: identify loop heads and loop parents. This uses a
/// variant of Tarjan's SCC algorithm. Basically, we do a
/// depth-first search. Each time we encounter a backedge, the
/// target of that backedge is a loop-head, so we make a
/// corresponding loop, if we haven't done so already. We then track
/// the set of loops that `node` was able to reach via backedges.
/// The innermost such loop is the loop-id of `node`, and we then
/// return the set for use by the predecessor of `node`.
fn head_walk(&mut self,
node: G::Node)
-> HashSet<LoopId> {
assert_eq!(self.state[node], NotYetStarted);
self.state[node] = InProgress(None);
// Walk our successors and collect the set of backedges they
// reach.
let mut set = HashSet::new();
for successor in self.graph.successors(node) {
match self.state[successor] {
NotYetStarted => {
set.extend(self.head_walk(successor));
}
InProgress(opt_loop_id) => {
// Backedge. Successor is a loop-head.
if let Some(loop_id) = opt_loop_id {
set.insert(loop_id);
} else {
set.insert(self.promote_to_loop_head(successor));
}
}
FinishedHeadWalk => {
// Cross edge.
}
EnqueuedExitWalk => {
unreachable!()
}
}
}
self.state[node] = FinishedHeadWalk;
// Assign a loop-id to this node. This will be the innermost
// loop that we could reach.
match self.innermost(&set) {
Some(loop_id) => {
self.loop_tree.set_loop_id(node, Some(loop_id));
// Check if we are the loop head. In that case, we
// should remove ourselves from the returned set,
// since our parent in the spanning tree is not a
// member of this loop.
let loop_head = self.loop_tree.loop_head(loop_id);
if node == loop_head {
set.remove(&loop_id);
// Now the next-innermost loop is the parent of this loop.
let parent_loop_id = self.innermost(&set);
self.loop_tree.set_parent(loop_id, parent_loop_id);
}
}
None => {
assert!(set.is_empty());
assert!(self.loop_tree.loop_id(node).is_none()); // all none by default
}
}
set
}
fn exit_walk(&mut self, node: G::Node) {
let mut stack = vec![node];
assert_eq!(self.state[node], FinishedHeadWalk);
self.state[node] = EnqueuedExitWalk;
while let Some(node) = stack.pop() {
// For each successor, check what loop they are in. If any of
// them are in a loop outer to ours -- or not in a loop at all
// -- those are exits from this inner loop.
if let Some(loop_id) = self.loop_tree.loop_id(node) {
for successor in self.graph.successors(node) {
self.update_loop_exit(loop_id, successor);
}
}
// Visit our successors.
for successor in self.graph.successors(node) {
match self.state[successor] {
NotYetStarted | InProgress(_) => {
unreachable!();
}
FinishedHeadWalk => {
stack.push(successor);
self.state[successor] = EnqueuedExitWalk;
}
EnqueuedExitWalk => {
}
}
}
}
}
fn promote_to_loop_head(&mut self,
node: G::Node)
-> LoopId {
assert_eq!(self.state[node], InProgress(None));
let loop_id = self.loop_tree.new_loop(node);
self.state[node] = InProgress(Some(loop_id));
loop_id
}
fn innermost(&self, set: &HashSet<LoopId>) -> Option<LoopId> {
let mut innermost = None;
for &loop_id1 in set {
if let Some(loop_id2) = innermost {
if self.is_inner_loop_of(loop_id1, loop_id2) {
innermost = Some(loop_id1);
}
} else {
innermost = Some(loop_id1);
}
}
innermost
}
fn is_inner_loop_of(&self, l1: LoopId, l2: LoopId) -> bool {
let h1 = self.loop_tree.loop_head(l1);
let h2 = self.loop_tree.loop_head(l2);
assert!(h1!= h2);
if self.dominators.is_dominated_by(h1, h2) {
true
} else {
// These two must have a dominance relationship or else
// the graph is not reducible.
assert!(self.dominators.is_dominated_by(h2, h1));
false
}
}
/// Some node that is in loop `loop_id` has the successor
/// `successor`. Check if `successor` is not in the loop
/// `loop_id` and update loop exits appropriately.
fn | (&mut self, mut loop_id: LoopId, successor: G::Node) {
match self.loop_tree.loop_id(successor) {
Some(successor_loop_id) => {
// If the successor's loop is an outer-loop of ours,
// then this is an exit from our loop and all
// intervening loops.
if self.loop_tree.parents(loop_id).any(|p| p == successor_loop_id) {
while loop_id!= successor_loop_id {
self.loop_tree.push_loop_exit(loop_id, successor);
loop_id = self.loop_tree.parent(loop_id).unwrap();
}
}
}
None => {
// Successor is not in a loop, so this is an exit from
// `loop_id` and all of its parents.
let mut p = Some(loop_id);
while let Some(l) = p {
self.loop_tree.push_loop_exit(l, successor);
p = self.loop_tree.parent(l);
}
}
}
}
}
| update_loop_exit | identifier_name |
walk.rs | use super::tree::*;
use super::super::Graph;
use super::super::dominators::Dominators;
use super::super::node_vec::NodeVec;
use std::collections::HashSet;
use std::default::Default;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum NodeState {
NotYetStarted,
InProgress(Option<LoopId>),
FinishedHeadWalk,
EnqueuedExitWalk,
}
use self::NodeState::*;
impl Default for NodeState {
fn default() -> Self {
NotYetStarted
}
}
pub struct LoopTreeWalk<'walk, G: Graph + 'walk> {
graph: &'walk G,
dominators: &'walk Dominators<G>,
state: NodeVec<G, NodeState>,
loop_tree: LoopTree<G>,
}
impl<'walk, G: Graph> LoopTreeWalk<'walk, G> {
pub fn new(graph: &'walk G,
dominators: &'walk Dominators<G>)
-> Self {
LoopTreeWalk {
graph: graph,
dominators: dominators,
state: NodeVec::from_default(graph),
loop_tree: LoopTree::new(graph),
}
}
pub fn compute_loop_tree(mut self) -> LoopTree<G> {
self.head_walk(self.graph.start_node());
self.exit_walk(self.graph.start_node());
self.loop_tree
}
/// First walk: identify loop heads and loop parents. This uses a
/// variant of Tarjan's SCC algorithm. Basically, we do a
/// depth-first search. Each time we encounter a backedge, the
/// target of that backedge is a loop-head, so we make a
/// corresponding loop, if we haven't done so already. We then track
/// the set of loops that `node` was able to reach via backedges.
/// The innermost such loop is the loop-id of `node`, and we then
/// return the set for use by the predecessor of `node`.
fn head_walk(&mut self,
node: G::Node)
-> HashSet<LoopId> {
assert_eq!(self.state[node], NotYetStarted);
self.state[node] = InProgress(None);
// Walk our successors and collect the set of backedges they
// reach.
let mut set = HashSet::new();
for successor in self.graph.successors(node) {
match self.state[successor] {
NotYetStarted => {
set.extend(self.head_walk(successor));
}
InProgress(opt_loop_id) => {
// Backedge. Successor is a loop-head.
if let Some(loop_id) = opt_loop_id {
set.insert(loop_id);
} else {
set.insert(self.promote_to_loop_head(successor));
}
}
FinishedHeadWalk => {
// Cross edge.
}
EnqueuedExitWalk => |
}
}
self.state[node] = FinishedHeadWalk;
// Assign a loop-id to this node. This will be the innermost
// loop that we could reach.
match self.innermost(&set) {
Some(loop_id) => {
self.loop_tree.set_loop_id(node, Some(loop_id));
// Check if we are the loop head. In that case, we
// should remove ourselves from the returned set,
// since our parent in the spanning tree is not a
// member of this loop.
let loop_head = self.loop_tree.loop_head(loop_id);
if node == loop_head {
set.remove(&loop_id);
// Now the next-innermost loop is the parent of this loop.
let parent_loop_id = self.innermost(&set);
self.loop_tree.set_parent(loop_id, parent_loop_id);
}
}
None => {
assert!(set.is_empty());
assert!(self.loop_tree.loop_id(node).is_none()); // all none by default
}
}
set
}
fn exit_walk(&mut self, node: G::Node) {
let mut stack = vec![node];
assert_eq!(self.state[node], FinishedHeadWalk);
self.state[node] = EnqueuedExitWalk;
while let Some(node) = stack.pop() {
// For each successor, check what loop they are in. If any of
// them are in a loop outer to ours -- or not in a loop at all
// -- those are exits from this inner loop.
if let Some(loop_id) = self.loop_tree.loop_id(node) {
for successor in self.graph.successors(node) {
self.update_loop_exit(loop_id, successor);
}
}
// Visit our successors.
for successor in self.graph.successors(node) {
match self.state[successor] {
NotYetStarted | InProgress(_) => {
unreachable!();
}
FinishedHeadWalk => {
stack.push(successor);
self.state[successor] = EnqueuedExitWalk;
}
EnqueuedExitWalk => {
}
}
}
}
}
fn promote_to_loop_head(&mut self,
node: G::Node)
-> LoopId {
assert_eq!(self.state[node], InProgress(None));
let loop_id = self.loop_tree.new_loop(node);
self.state[node] = InProgress(Some(loop_id));
loop_id
}
fn innermost(&self, set: &HashSet<LoopId>) -> Option<LoopId> {
let mut innermost = None;
for &loop_id1 in set {
if let Some(loop_id2) = innermost {
if self.is_inner_loop_of(loop_id1, loop_id2) {
innermost = Some(loop_id1);
}
} else {
innermost = Some(loop_id1);
}
}
innermost
}
fn is_inner_loop_of(&self, l1: LoopId, l2: LoopId) -> bool {
let h1 = self.loop_tree.loop_head(l1);
let h2 = self.loop_tree.loop_head(l2);
assert!(h1!= h2);
if self.dominators.is_dominated_by(h1, h2) {
true
} else {
// These two must have a dominance relationship or else
// the graph is not reducible.
assert!(self.dominators.is_dominated_by(h2, h1));
false
}
}
/// Some node that is in loop `loop_id` has the successor
/// `successor`. Check if `successor` is not in the loop
/// `loop_id` and update loop exits appropriately.
fn update_loop_exit(&mut self, mut loop_id: LoopId, successor: G::Node) {
match self.loop_tree.loop_id(successor) {
Some(successor_loop_id) => {
// If the successor's loop is an outer-loop of ours,
// then this is an exit from our loop and all
// intervening loops.
if self.loop_tree.parents(loop_id).any(|p| p == successor_loop_id) {
while loop_id!= successor_loop_id {
self.loop_tree.push_loop_exit(loop_id, successor);
loop_id = self.loop_tree.parent(loop_id).unwrap();
}
}
}
None => {
// Successor is not in a loop, so this is an exit from
// `loop_id` and all of its parents.
let mut p = Some(loop_id);
while let Some(l) = p {
self.loop_tree.push_loop_exit(l, successor);
p = self.loop_tree.parent(l);
}
}
}
}
}
| {
unreachable!()
} | conditional_block |
walk.rs | use super::tree::*;
use super::super::Graph;
use super::super::dominators::Dominators;
use super::super::node_vec::NodeVec;
use std::collections::HashSet;
use std::default::Default;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum NodeState {
NotYetStarted,
InProgress(Option<LoopId>),
FinishedHeadWalk,
EnqueuedExitWalk,
}
use self::NodeState::*;
impl Default for NodeState {
fn default() -> Self {
NotYetStarted
}
}
pub struct LoopTreeWalk<'walk, G: Graph + 'walk> {
graph: &'walk G,
dominators: &'walk Dominators<G>,
state: NodeVec<G, NodeState>,
loop_tree: LoopTree<G>,
}
impl<'walk, G: Graph> LoopTreeWalk<'walk, G> {
pub fn new(graph: &'walk G,
dominators: &'walk Dominators<G>)
-> Self {
LoopTreeWalk {
graph: graph,
dominators: dominators,
state: NodeVec::from_default(graph),
loop_tree: LoopTree::new(graph),
}
}
pub fn compute_loop_tree(mut self) -> LoopTree<G> {
self.head_walk(self.graph.start_node());
self.exit_walk(self.graph.start_node());
self.loop_tree
}
/// First walk: identify loop heads and loop parents. This uses a
/// variant of Tarjan's SCC algorithm. Basically, we do a
/// depth-first search. Each time we encounter a backedge, the
/// target of that backedge is a loop-head, so we make a
/// corresponding loop, if we haven't done so already. We then track
/// the set of loops that `node` was able to reach via backedges.
/// The innermost such loop is the loop-id of `node`, and we then
/// return the set for use by the predecessor of `node`.
fn head_walk(&mut self,
node: G::Node)
-> HashSet<LoopId> {
assert_eq!(self.state[node], NotYetStarted);
self.state[node] = InProgress(None);
// Walk our successors and collect the set of backedges they
// reach.
let mut set = HashSet::new();
for successor in self.graph.successors(node) {
match self.state[successor] {
NotYetStarted => {
set.extend(self.head_walk(successor));
}
InProgress(opt_loop_id) => {
// Backedge. Successor is a loop-head.
if let Some(loop_id) = opt_loop_id {
set.insert(loop_id);
} else {
set.insert(self.promote_to_loop_head(successor));
}
}
FinishedHeadWalk => {
// Cross edge.
}
EnqueuedExitWalk => {
unreachable!()
}
}
} | match self.innermost(&set) {
Some(loop_id) => {
self.loop_tree.set_loop_id(node, Some(loop_id));
// Check if we are the loop head. In that case, we
// should remove ourselves from the returned set,
// since our parent in the spanning tree is not a
// member of this loop.
let loop_head = self.loop_tree.loop_head(loop_id);
if node == loop_head {
set.remove(&loop_id);
// Now the next-innermost loop is the parent of this loop.
let parent_loop_id = self.innermost(&set);
self.loop_tree.set_parent(loop_id, parent_loop_id);
}
}
None => {
assert!(set.is_empty());
assert!(self.loop_tree.loop_id(node).is_none()); // all none by default
}
}
set
}
fn exit_walk(&mut self, node: G::Node) {
let mut stack = vec![node];
assert_eq!(self.state[node], FinishedHeadWalk);
self.state[node] = EnqueuedExitWalk;
while let Some(node) = stack.pop() {
// For each successor, check what loop they are in. If any of
// them are in a loop outer to ours -- or not in a loop at all
// -- those are exits from this inner loop.
if let Some(loop_id) = self.loop_tree.loop_id(node) {
for successor in self.graph.successors(node) {
self.update_loop_exit(loop_id, successor);
}
}
// Visit our successors.
for successor in self.graph.successors(node) {
match self.state[successor] {
NotYetStarted | InProgress(_) => {
unreachable!();
}
FinishedHeadWalk => {
stack.push(successor);
self.state[successor] = EnqueuedExitWalk;
}
EnqueuedExitWalk => {
}
}
}
}
}
fn promote_to_loop_head(&mut self,
node: G::Node)
-> LoopId {
assert_eq!(self.state[node], InProgress(None));
let loop_id = self.loop_tree.new_loop(node);
self.state[node] = InProgress(Some(loop_id));
loop_id
}
fn innermost(&self, set: &HashSet<LoopId>) -> Option<LoopId> {
let mut innermost = None;
for &loop_id1 in set {
if let Some(loop_id2) = innermost {
if self.is_inner_loop_of(loop_id1, loop_id2) {
innermost = Some(loop_id1);
}
} else {
innermost = Some(loop_id1);
}
}
innermost
}
fn is_inner_loop_of(&self, l1: LoopId, l2: LoopId) -> bool {
let h1 = self.loop_tree.loop_head(l1);
let h2 = self.loop_tree.loop_head(l2);
assert!(h1!= h2);
if self.dominators.is_dominated_by(h1, h2) {
true
} else {
// These two must have a dominance relationship or else
// the graph is not reducible.
assert!(self.dominators.is_dominated_by(h2, h1));
false
}
}
/// Some node that is in loop `loop_id` has the successor
/// `successor`. Check if `successor` is not in the loop
/// `loop_id` and update loop exits appropriately.
fn update_loop_exit(&mut self, mut loop_id: LoopId, successor: G::Node) {
match self.loop_tree.loop_id(successor) {
Some(successor_loop_id) => {
// If the successor's loop is an outer-loop of ours,
// then this is an exit from our loop and all
// intervening loops.
if self.loop_tree.parents(loop_id).any(|p| p == successor_loop_id) {
while loop_id!= successor_loop_id {
self.loop_tree.push_loop_exit(loop_id, successor);
loop_id = self.loop_tree.parent(loop_id).unwrap();
}
}
}
None => {
// Successor is not in a loop, so this is an exit from
// `loop_id` and all of its parents.
let mut p = Some(loop_id);
while let Some(l) = p {
self.loop_tree.push_loop_exit(l, successor);
p = self.loop_tree.parent(l);
}
}
}
}
} |
self.state[node] = FinishedHeadWalk;
// Assign a loop-id to this node. This will be the innermost
// loop that we could reach. | random_line_split |
cabi_x86_64.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_upper_case_globals)]
use self::RegClass::*;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute, Vector};
use trans::cabi::{ArgType, FnType};
use trans::context::CrateContext;
use trans::type_::Type;
use std::cmp;
#[derive(Clone, Copy, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt(/* bitwidth */ u64),
/// Data that can appear in the upper half of an SSE register.
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl ClassList for [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.is_empty() { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.is_empty() { return false; }
self[0] == Memory
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: usize, ty: Type) -> usize {
let a = ty_align(ty);
return (off + a - 1) / a * a;
}
fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
ty_align(elt) * len
}
_ => panic!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => (ty.int_width() as usize + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => panic!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: usize,
newv: RegClass) {
if cls[i] == newv { return }
let to_write = match (cls[i], newv) {
(NoClass, _) => newv,
(_, NoClass) => return,
(Memory, _) |
(_, Memory) => Memory,
(Int, _) |
(_, Int) => Int,
(X87, _) |
(X87Up, _) |
(ComplexX87, _) |
(_, X87) |
(_, X87Up) |
(_, ComplexX87) => Memory,
(SSEFv, SSEUp) |
(SSEFs, SSEUp) |
(SSEDv, SSEUp) |
(SSEDs, SSEUp) |
(SSEInt(_), SSEUp) => return,
(_, _) => newv
};
cls[i] = to_write;
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: usize,
off: usize,
packed: bool) {
let mut field_off = off;
for ty in tys {
if!packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn | (ty: Type,
cls: &mut [RegClass], ix: usize,
off: usize) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign!= 0 {
let mut i = off / 8;
let e = (off + t_size + 7) / 8;
while i < e {
unify(cls, ix + i, Memory);
i += 1;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8, Int);
}
Float => {
if off % 8 == 4 {
unify(cls, ix + off / 8, SSEFv);
} else {
unify(cls, ix + off / 8, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8, SSEDs);
}
Struct => {
classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1;
}
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut reg = match elt.kind() {
Integer => SSEInt(elt.int_width()),
Float => SSEFv,
Double => SSEDv,
_ => panic!("classify: unhandled vector element type")
};
let mut i = 0;
while i < len {
unify(cls, ix + (off + i * eltsz) / 8, reg);
// everything after the first one is the upper
// half of a register.
reg = SSEUp;
i += 1;
}
}
_ => panic!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) {
if cls[i].is_sse() {
i += 1;
while i < e {
if cls[i]!= SSEUp {
all_mem(cls);
return;
}
i += 1;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i!= e && cls[i] == SSEUp { i += 1; }
} else if cls[i] == X87 {
i += 1;
while i!= e && cls[i] == X87Up { i += 1; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = vec![NoClass; words];
if words > 4 {
all_mem(&mut cls);
return cls;
}
classify(ty, &mut cls, 0, 0);
fixup(ty, &mut cls);
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> usize {
let mut len = 1;
for c in cls {
if *c!= SSEUp {
break;
}
len += 1;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv | SSEDv | SSEInt(_) => {
let (elts_per_word, elt_ty) = match cls[i] {
SSEFv => (2, Type::f32(ccx)),
SSEDv => (1, Type::f64(ccx)),
SSEInt(bits) => {
assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64,
"llreg_ty: unsupported SSEInt width {}", bits);
(64 / bits, Type::ix(ccx, bits))
}
_ => unreachable!(),
};
let vec_len = llvec_len(&cls[i + 1..]);
let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => panic!("llregtype: unhandled class")
}
i += 1;
}
if tys.len() == 1 && tys[0].kind() == Vector {
// if the type contains only a vector, pass it as that vector.
tys[0]
} else {
Type::struct_(ccx, &tys, false)
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty<F>(ccx: &CrateContext,
ty: Type,
is_mem_cls: F,
ind_attr: Attribute)
-> ArgType where
F: FnOnce(&[RegClass]) -> bool,
{
if!ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(&cls) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, &cls)),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), Attribute::ByVal);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), Attribute::StructRet)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
| classify | identifier_name |
cabi_x86_64.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_upper_case_globals)]
use self::RegClass::*;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute, Vector};
use trans::cabi::{ArgType, FnType};
use trans::context::CrateContext;
use trans::type_::Type;
use std::cmp;
#[derive(Clone, Copy, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt(/* bitwidth */ u64),
/// Data that can appear in the upper half of an SSE register.
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl ClassList for [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.is_empty() { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.is_empty() { return false; }
self[0] == Memory
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: usize, ty: Type) -> usize {
let a = ty_align(ty);
return (off + a - 1) / a * a;
}
fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
ty_align(elt) * len
}
_ => panic!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => (ty.int_width() as usize + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => panic!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: usize,
newv: RegClass) {
if cls[i] == newv { return }
let to_write = match (cls[i], newv) {
(NoClass, _) => newv,
(_, NoClass) => return,
(Memory, _) |
(_, Memory) => Memory,
(Int, _) |
(_, Int) => Int,
(X87, _) |
(X87Up, _) |
(ComplexX87, _) |
(_, X87) |
(_, X87Up) |
(_, ComplexX87) => Memory,
(SSEFv, SSEUp) |
(SSEFs, SSEUp) |
(SSEDv, SSEUp) |
(SSEDs, SSEUp) |
(SSEInt(_), SSEUp) => return,
(_, _) => newv
};
cls[i] = to_write;
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: usize,
off: usize,
packed: bool) {
let mut field_off = off;
for ty in tys {
if!packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: usize,
off: usize) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign!= 0 {
let mut i = off / 8;
let e = (off + t_size + 7) / 8;
while i < e {
unify(cls, ix + i, Memory);
i += 1;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8, Int);
}
Float => {
if off % 8 == 4 {
unify(cls, ix + off / 8, SSEFv);
} else {
unify(cls, ix + off / 8, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8, SSEDs);
}
Struct => {
classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed()); | Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1;
}
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut reg = match elt.kind() {
Integer => SSEInt(elt.int_width()),
Float => SSEFv,
Double => SSEDv,
_ => panic!("classify: unhandled vector element type")
};
let mut i = 0;
while i < len {
unify(cls, ix + (off + i * eltsz) / 8, reg);
// everything after the first one is the upper
// half of a register.
reg = SSEUp;
i += 1;
}
}
_ => panic!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) {
if cls[i].is_sse() {
i += 1;
while i < e {
if cls[i]!= SSEUp {
all_mem(cls);
return;
}
i += 1;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i!= e && cls[i] == SSEUp { i += 1; }
} else if cls[i] == X87 {
i += 1;
while i!= e && cls[i] == X87Up { i += 1; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = vec![NoClass; words];
if words > 4 {
all_mem(&mut cls);
return cls;
}
classify(ty, &mut cls, 0, 0);
fixup(ty, &mut cls);
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> usize {
let mut len = 1;
for c in cls {
if *c!= SSEUp {
break;
}
len += 1;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv | SSEDv | SSEInt(_) => {
let (elts_per_word, elt_ty) = match cls[i] {
SSEFv => (2, Type::f32(ccx)),
SSEDv => (1, Type::f64(ccx)),
SSEInt(bits) => {
assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64,
"llreg_ty: unsupported SSEInt width {}", bits);
(64 / bits, Type::ix(ccx, bits))
}
_ => unreachable!(),
};
let vec_len = llvec_len(&cls[i + 1..]);
let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => panic!("llregtype: unhandled class")
}
i += 1;
}
if tys.len() == 1 && tys[0].kind() == Vector {
// if the type contains only a vector, pass it as that vector.
tys[0]
} else {
Type::struct_(ccx, &tys, false)
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty<F>(ccx: &CrateContext,
ty: Type,
is_mem_cls: F,
ind_attr: Attribute)
-> ArgType where
F: FnOnce(&[RegClass]) -> bool,
{
if!ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(&cls) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, &cls)),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), Attribute::ByVal);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), Attribute::StructRet)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} | } | random_line_split |
cabi_x86_64.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_upper_case_globals)]
use self::RegClass::*;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute, Vector};
use trans::cabi::{ArgType, FnType};
use trans::context::CrateContext;
use trans::type_::Type;
use std::cmp;
#[derive(Clone, Copy, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt(/* bitwidth */ u64),
/// Data that can appear in the upper half of an SSE register.
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl ClassList for [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.is_empty() { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.is_empty() { return false; }
self[0] == Memory
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: usize, ty: Type) -> usize {
let a = ty_align(ty);
return (off + a - 1) / a * a;
}
fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
ty_align(elt) * len
}
_ => panic!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => (ty.int_width() as usize + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => panic!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: usize,
newv: RegClass) {
if cls[i] == newv { return }
let to_write = match (cls[i], newv) {
(NoClass, _) => newv,
(_, NoClass) => return,
(Memory, _) |
(_, Memory) => Memory,
(Int, _) |
(_, Int) => Int,
(X87, _) |
(X87Up, _) |
(ComplexX87, _) |
(_, X87) |
(_, X87Up) |
(_, ComplexX87) => Memory,
(SSEFv, SSEUp) |
(SSEFs, SSEUp) |
(SSEDv, SSEUp) |
(SSEDs, SSEUp) |
(SSEInt(_), SSEUp) => return,
(_, _) => newv
};
cls[i] = to_write;
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: usize,
off: usize,
packed: bool) {
let mut field_off = off;
for ty in tys {
if!packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: usize,
off: usize) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign!= 0 {
let mut i = off / 8;
let e = (off + t_size + 7) / 8;
while i < e {
unify(cls, ix + i, Memory);
i += 1;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8, Int);
}
Float => {
if off % 8 == 4 {
unify(cls, ix + off / 8, SSEFv);
} else {
unify(cls, ix + off / 8, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8, SSEDs);
}
Struct => {
classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1;
}
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut reg = match elt.kind() {
Integer => SSEInt(elt.int_width()),
Float => SSEFv,
Double => SSEDv,
_ => panic!("classify: unhandled vector element type")
};
let mut i = 0;
while i < len {
unify(cls, ix + (off + i * eltsz) / 8, reg);
// everything after the first one is the upper
// half of a register.
reg = SSEUp;
i += 1;
}
}
_ => panic!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) {
if cls[i].is_sse() {
i += 1;
while i < e {
if cls[i]!= SSEUp {
all_mem(cls);
return;
}
i += 1;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i!= e && cls[i] == SSEUp { i += 1; }
} else if cls[i] == X87 {
i += 1;
while i!= e && cls[i] == X87Up { i += 1; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = vec![NoClass; words];
if words > 4 {
all_mem(&mut cls);
return cls;
}
classify(ty, &mut cls, 0, 0);
fixup(ty, &mut cls);
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> usize {
let mut len = 1;
for c in cls {
if *c!= SSEUp {
break;
}
len += 1;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv | SSEDv | SSEInt(_) => {
let (elts_per_word, elt_ty) = match cls[i] {
SSEFv => (2, Type::f32(ccx)),
SSEDv => (1, Type::f64(ccx)),
SSEInt(bits) => {
assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64,
"llreg_ty: unsupported SSEInt width {}", bits);
(64 / bits, Type::ix(ccx, bits))
}
_ => unreachable!(),
};
let vec_len = llvec_len(&cls[i + 1..]);
let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => |
_ => panic!("llregtype: unhandled class")
}
i += 1;
}
if tys.len() == 1 && tys[0].kind() == Vector {
// if the type contains only a vector, pass it as that vector.
tys[0]
} else {
Type::struct_(ccx, &tys, false)
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty<F>(ccx: &CrateContext,
ty: Type,
is_mem_cls: F,
ind_attr: Attribute)
-> ArgType where
F: FnOnce(&[RegClass]) -> bool,
{
if!ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(&cls) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, &cls)),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), Attribute::ByVal);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), Attribute::StructRet)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
| {
tys.push(Type::f64(ccx));
} | conditional_block |
cabi_x86_64.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_upper_case_globals)]
use self::RegClass::*;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute, Vector};
use trans::cabi::{ArgType, FnType};
use trans::context::CrateContext;
use trans::type_::Type;
use std::cmp;
#[derive(Clone, Copy, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt(/* bitwidth */ u64),
/// Data that can appear in the upper half of an SSE register.
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl ClassList for [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.is_empty() { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.is_empty() { return false; }
self[0] == Memory
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: usize, ty: Type) -> usize {
let a = ty_align(ty);
return (off + a - 1) / a * a;
}
fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
ty_align(elt) * len
}
_ => panic!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => (ty.int_width() as usize + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => panic!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: usize,
newv: RegClass) {
if cls[i] == newv { return }
let to_write = match (cls[i], newv) {
(NoClass, _) => newv,
(_, NoClass) => return,
(Memory, _) |
(_, Memory) => Memory,
(Int, _) |
(_, Int) => Int,
(X87, _) |
(X87Up, _) |
(ComplexX87, _) |
(_, X87) |
(_, X87Up) |
(_, ComplexX87) => Memory,
(SSEFv, SSEUp) |
(SSEFs, SSEUp) |
(SSEDv, SSEUp) |
(SSEDs, SSEUp) |
(SSEInt(_), SSEUp) => return,
(_, _) => newv
};
cls[i] = to_write;
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: usize,
off: usize,
packed: bool) {
let mut field_off = off;
for ty in tys {
if!packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: usize,
off: usize) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign!= 0 {
let mut i = off / 8;
let e = (off + t_size + 7) / 8;
while i < e {
unify(cls, ix + i, Memory);
i += 1;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8, Int);
}
Float => {
if off % 8 == 4 {
unify(cls, ix + off / 8, SSEFv);
} else {
unify(cls, ix + off / 8, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8, SSEDs);
}
Struct => {
classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1;
}
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut reg = match elt.kind() {
Integer => SSEInt(elt.int_width()),
Float => SSEFv,
Double => SSEDv,
_ => panic!("classify: unhandled vector element type")
};
let mut i = 0;
while i < len {
unify(cls, ix + (off + i * eltsz) / 8, reg);
// everything after the first one is the upper
// half of a register.
reg = SSEUp;
i += 1;
}
}
_ => panic!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) {
if cls[i].is_sse() {
i += 1;
while i < e {
if cls[i]!= SSEUp {
all_mem(cls);
return;
}
i += 1;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i!= e && cls[i] == SSEUp { i += 1; }
} else if cls[i] == X87 {
i += 1;
while i!= e && cls[i] == X87Up { i += 1; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = vec![NoClass; words];
if words > 4 {
all_mem(&mut cls);
return cls;
}
classify(ty, &mut cls, 0, 0);
fixup(ty, &mut cls);
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> usize {
let mut len = 1;
for c in cls {
if *c!= SSEUp {
break;
}
len += 1;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv | SSEDv | SSEInt(_) => {
let (elts_per_word, elt_ty) = match cls[i] {
SSEFv => (2, Type::f32(ccx)),
SSEDv => (1, Type::f64(ccx)),
SSEInt(bits) => {
assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64,
"llreg_ty: unsupported SSEInt width {}", bits);
(64 / bits, Type::ix(ccx, bits))
}
_ => unreachable!(),
};
let vec_len = llvec_len(&cls[i + 1..]);
let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => panic!("llregtype: unhandled class")
}
i += 1;
}
if tys.len() == 1 && tys[0].kind() == Vector {
// if the type contains only a vector, pass it as that vector.
tys[0]
} else {
Type::struct_(ccx, &tys, false)
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType | ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), Attribute::ByVal);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), Attribute::StructRet)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
| {
fn x86_64_ty<F>(ccx: &CrateContext,
ty: Type,
is_mem_cls: F,
ind_attr: Attribute)
-> ArgType where
F: FnOnce(&[RegClass]) -> bool,
{
if !ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(&cls) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, &cls)),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; | identifier_body |
project.rs |
};
use mentat_core::{
SQLValueType,
SQLValueTypeSet,
};
use mentat_core::util::{
Either,
};
use edn::query::{
Element,
Pull,
Variable,
};
use mentat_query_algebrizer::{
AlgebraicQuery,
ColumnName,
ConjoiningClauses,
QualifiedAlias,
VariableColumn,
};
use mentat_query_sql::{
ColumnOrExpression,
GroupBy,
Name,
Projection,
ProjectedColumn,
};
use query_projector_traits::aggregates::{
SimpleAggregation,
projected_column_for_simple_aggregate,
};
use query_projector_traits::errors::{
ProjectorError,
Result,
};
use projectors::{
Projector,
};
use pull::{
PullIndices,
PullOperation,
PullTemplate,
};
use super::{
CombinedProjection,
TypedIndex,
};
/// An internal temporary struct to pass between the projection 'walk' and the
/// resultant projector.
/// Projection accumulates four things:
/// - Two SQL projection lists. We need two because aggregate queries are nested
/// in order to apply DISTINCT to values prior to aggregation.
/// - A collection of templates for the projector to use to extract values.
/// - A list of columns to use for grouping. Grouping is a property of the projection!
pub(crate) struct ProjectedElements {
pub sql_projection: Projection,
pub pre_aggregate_projection: Option<Projection>,
pub templates: Vec<TypedIndex>,
// TODO: when we have an expression like
// [:find (pull?x [:foo/name :foo/age]) (pull?x [:foo/friend]) …]
// it would be more efficient to combine them.
pub pulls: Vec<PullTemplate>,
pub group_by: Vec<GroupBy>,
}
impl ProjectedElements {
pub(crate) fn combine(self, projector: Box<Projector>, distinct: bool) -> Result<CombinedProjection> {
Ok(CombinedProjection {
sql_projection: self.sql_projection,
pre_aggregate_projection: self.pre_aggregate_projection,
datalog_projector: projector,
distinct: distinct,
group_by_cols: self.group_by,
})
}
// We need the templates to make a projector that we can then hand to `combine`. This is the easy
// way to get it.
pub(crate) fn take_templates(&mut self) -> Vec<TypedIndex> {
let mut out = vec![];
::std::mem::swap(&mut out, &mut self.templates);
out
}
pub(crate) fn take_pulls(&mut self) -> Vec<PullTemplate> {
let mut out = vec![];
::std::mem::swap(&mut out, &mut self.pulls);
out
}
}
fn candidate_type_column(cc: &ConjoiningClauses, var: &Variable) -> Result<(ColumnOrExpression, Name)> {
cc.extracted_types
.get(var)
.cloned()
.map(|alias| {
let type_name = VariableColumn::VariableTypeTag(var.clone()).column_name();
(ColumnOrExpression::Column(alias), type_name)
})
.ok_or_else(|| ProjectorError::UnboundVariable(var.name()).into())
}
fn cc_column(cc: &ConjoiningClauses, var: &Variable) -> Result<QualifiedAlias> {
cc.column_bindings
.get(var)
.and_then(|cols| cols.get(0).cloned())
.ok_or_else(|| ProjectorError::UnboundVariable(var.name()).into())
}
fn candidate_column(cc: &ConjoiningClauses, var: &Variable) -> Result<(ColumnOrExpression, Name)> {
// Every variable should be bound by the top-level CC to at least
// one column in the query. If that constraint is violated it's a
// bug in our code, so it's appropriate to panic here.
cc_column(cc, var)
.map(|qa| {
let name = VariableColumn::Variable(var.clone()).column_name();
(ColumnOrExpression::Column(qa), name)
})
}
/// Return the projected column -- that is, a value or SQL column and an associated name -- for a
/// given variable. Also return the type.
/// Callers are expected to determine whether to project a type tag as an additional SQL column.
pub fn projected_column_for_var(var: &Variable, cc: &ConjoiningClauses) -> Result<(ProjectedColumn, ValueTypeSet)> {
if let Some(value) = cc.bound_value(&var) {
// If we already know the value, then our lives are easy.
let tag = value.value_type();
let name = VariableColumn::Variable(var.clone()).column_name();
Ok((ProjectedColumn(ColumnOrExpression::Value(value.clone()), name), ValueTypeSet::of_one(tag)))
} else {
// If we don't, then the CC *must* have bound the variable.
let (column, name) = candidate_column(cc, var)?;
Ok((ProjectedColumn(column, name), cc.known_type_set(var)))
}
}
/// Walk an iterator of `Element`s, collecting projector templates and columns.
///
/// Returns a `ProjectedElements`, which combines SQL projections
/// and a `Vec` of `TypedIndex` 'keys' to use when looking up values.
///
/// Callers must ensure that every `Element` is distinct -- a query like
///
/// ```edn
/// [:find?x?x :where [?x _ _]]
/// ```
///
/// should fail to parse. See #358.
pub(crate) fn project_elements<'a, I: IntoIterator<Item = &'a Element>>(
count: usize,
elements: I,
query: &AlgebraicQuery) -> Result<ProjectedElements> {
// Give a little padding for type tags.
let mut inner_projection = Vec::with_capacity(count + 2);
// Everything in the outer query will _either_ be an aggregate operation
// _or_ a reference to a name projected from the inner.
// We'll expand them later.
let mut outer_projection: Vec<Either<Name, ProjectedColumn>> = Vec::with_capacity(count + 2);
let mut i: i32 = 0;
let mut min_max_count: usize = 0;
let mut templates = vec![];
let mut pulls: Vec<PullTemplate> = vec![];
let mut aggregates = false;
// Any variable that appears intact in the :find clause, not inside an aggregate expression.
// "Query variables not in aggregate expressions will group the results and appear intact
// in the result."
// We use an ordered set here so that we group in the correct order.
let mut outer_variables = IndexSet::new();
let mut corresponded_variables = IndexSet::new();
// Any variable that we are projecting from the inner query.
let mut inner_variables = BTreeSet::new();
for e in elements {
// Check for and reject duplicates.
match e {
&Element::Variable(ref var) => {
if outer_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Duplicate variable {} in query.", var)));
}
if corresponded_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Can't project both {} and `(the {})` from a query.", var, var)));
}
},
&Element::Corresponding(ref var) => {
if outer_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Can't project both {} and `(the {})` from a query.", var, var)));
}
if corresponded_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("`(the {})` appears twice in query.", var)));
}
},
&Element::Aggregate(_) => {
| &Element::Pull(_) => {
},
};
// Record variables -- `(the?x)` and `?x` are different in this regard, because we don't want
// to group on variables that are corresponding-projected.
match e {
&Element::Variable(ref var) => {
outer_variables.insert(var.clone());
},
&Element::Corresponding(ref var) => {
// We will project these later; don't put them in `outer_variables`
// so we know not to group them.
corresponded_variables.insert(var.clone());
},
&Element::Pull(Pull { ref var, patterns: _ }) => {
// We treat `pull` as an ordinary variable extraction,
// and we expand it later.
outer_variables.insert(var.clone());
},
&Element::Aggregate(_) => {
},
};
// Now do the main processing of each element.
match e {
// Each time we come across a variable, we push a SQL column
// into the SQL projection, aliased to the name of the variable,
// and we push an annotated index into the projector.
&Element::Variable(ref var) |
&Element::Corresponding(ref var) => {
inner_variables.insert(var.clone());
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
outer_projection.push(Either::Left(projected_column.1.clone()));
inner_projection.push(projected_column);
if let Some(tag) = type_set.unique_type_tag() {
templates.push(TypedIndex::Known(i, tag));
i += 1; // We used one SQL column.
} else {
templates.push(TypedIndex::Unknown(i, i + 1));
i += 2; // We used two SQL columns.
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
outer_projection.push(Either::Left(type_name));
}
},
&Element::Pull(Pull { ref var, ref patterns }) => {
inner_variables.insert(var.clone());
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
outer_projection.push(Either::Left(projected_column.1.clone()));
inner_projection.push(projected_column);
if let Some(tag) = type_set.unique_type_tag() {
// We will have at least as many SQL columns as Datalog output columns.
// `i` tracks the former. The length of `templates` is the current latter.
// Projecting pull requires grabbing values, which we can do from the raw
// rows, and then populating the output, so we keep both column indices.
let output_index = templates.len();
assert!(output_index <= i as usize);
templates.push(TypedIndex::Known(i, tag));
pulls.push(PullTemplate {
indices: PullIndices {
sql_index: i,
output_index,
},
op: PullOperation((*patterns).clone()),
});
i += 1; // We used one SQL column.
} else {
// This should be impossible: (pull?x) implies that?x is a ref.
unreachable!();
}
},
&Element::Aggregate(ref a) => {
if let Some(simple) = a.to_simple() {
aggregates = true;
use query_projector_traits::aggregates::SimpleAggregationOp::*;
match simple.op {
Max | Min => {
min_max_count += 1;
},
Avg | Count | Sum => (),
}
// When we encounter a simple aggregate -- one in which the aggregation can be
// implemented in SQL, on a single variable -- we just push the SQL aggregation op.
// We must ensure the following:
// - There's a column for the var.
// - The type of the var is known to be restricted to a sensible input set
// (not necessarily a single type, but e.g., all vals must be Double or Long).
// - The type set must be appropriate for the operation. E.g., `Sum` is not a
// meaningful operation on instants.
let (projected_column, return_type) = projected_column_for_simple_aggregate(&simple, &query.cc)?;
outer_projection.push(Either::Right(projected_column));
if!inner_variables.contains(&simple.var) {
inner_variables.insert(simple.var.clone());
let (projected_column, _type_set) = projected_column_for_var(&simple.var, &query.cc)?;
inner_projection.push(projected_column);
if query.cc.known_type_set(&simple.var).unique_type_tag().is_none() {
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &simple.var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
}
// We might regret using the type tag here instead of the `ValueType`.
templates.push(TypedIndex::Known(i, return_type.value_type_tag()));
i += 1;
} else {
// TODO: complex aggregates.
bail!(ProjectorError::NotYetImplemented("complex aggregates".into()));
}
},
}
}
match (min_max_count, corresponded_variables.len()) {
(0, 0) | (_, 0) => {},
(0, _) => {
bail!(ProjectorError::InvalidProjection("Warning: used `the` without `min` or `max`.".to_string()));
},
(1, _) => {
// This is the success case!
},
(n, c) => {
bail!(ProjectorError::AmbiguousAggregates(n, c));
},
}
// Anything used in ORDER BY (which we're given in `named_projection`)
// needs to be in the SQL column list so we can refer to it by name.
//
// They don't affect projection.
//
// If a variable is of a non-fixed type, also project the type tag column, so we don't
// accidentally unify across types when considering uniqueness!
for var in query.named_projection.iter() {
if outer_variables.contains(var) {
continue;
}
// If it's a fixed value, we need do nothing further.
if query.cc.is_value_bound(&var) {
continue;
}
let already_inner = inner_variables.contains(&var);
let (column, name) = candidate_column(&query.cc, &var)?;
if!already_inner {
inner_projection.push(ProjectedColumn(column, name.clone()));
inner_variables.insert(var.clone());
}
outer_projection.push(Either::Left(name));
outer_variables.insert(var.clone());
// We don't care if a column has a single _type_, we care if it has a single type _tag_,
// because that's what we'll use if we're projecting. E.g., Long and Double.
// Single type implies single type tag, and is cheaper, so we check that first.
let types = query.cc.known_type_set(&var);
if!types.has_unique_type_tag() {
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
if!already_inner {
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
outer_projection.push(Either::Left(type_name));
}
}
if!aggregates {
// We're done -- we never need to group unless we're aggregating.
return Ok(ProjectedElements {
sql_projection: Projection::Columns(inner_projection),
pre_aggregate_projection: None,
templates,
pulls,
group_by: vec![],
});
}
// OK, on to aggregates.
// We need to produce two SQL projection lists: one for an inner query and one for the outer.
//
// The inner serves these purposes:
// - Projecting variables to avoid duplicates being elided. (:with)
// - Making bindings available to the outermost query for projection, ordering, and grouping.
//
// The outer is consumed by the projector.
//
// We will also be producing:
// - A GROUP BY list to group the output of the inner query by non-aggregate variables
// so that it can be correctly aggregated.
// Turn this collection of vars into a collection of columns from the query.
// We don't allow grouping on anything but a variable bound in the query.
// We group by tag if necessary.
let mut group_by = Vec::with_capacity(outer_variables.len() + 2);
let vars = outer_variables.into_iter().zip(::std::iter::repeat(true));
let corresponds = corresponded_variables.into_iter().zip(::std::iter::repeat(false));
for (var, group) in vars.chain(corresponds) {
if query.cc.is_value_bound(&var) {
continue;
}
if group {
// The GROUP BY goes outside, but it needs every variable and type tag to be
// projected from inside. Collect in both directions here.
let name = VariableColumn::Variable(var.clone()).column_name();
group_by.push(GroupBy::ProjectedColumn(name));
}
let needs_type_projection =!query.cc.known_type_set(&var).has_unique_type_tag();
let already_inner = inner_variables.contains(&var);
if!already_inner {
let (column, name) = candidate_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(column, name.clone()));
}
if needs_type_projection {
let type_name = VariableColumn::VariableTypeTag(var.clone()).column_name();
if!already_inner {
let type_col = query.cc
.extracted_types
.get(&var)
.cloned()
.ok_or_else(|| ProjectorError::NoTypeAvailableForVariable(var.name().clone()))?;
inner_projection.push(ProjectedColumn(ColumnOrExpression::Column(type_col), type_name.clone()));
}
if group {
group_by.push(GroupBy::ProjectedColumn(type_name));
}
};
}
for var in query.with.iter() {
// We never need to project a constant.
if query.cc.is_value_bound(&var) {
continue;
}
// We don't need to add inner projections for :with if they are already there.
if!inner_variables.contains(&var) {
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
inner_projection.push(projected_column);
if type_set.unique_type_tag().is_none() {
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
}
| },
| conditional_block |
project.rs | ,
};
use mentat_core::{
SQLValueType,
SQLValueTypeSet,
};
use mentat_core::util::{
Either,
};
use edn::query::{
Element,
Pull,
Variable,
};
use mentat_query_algebrizer::{
AlgebraicQuery,
ColumnName, |
use mentat_query_sql::{
ColumnOrExpression,
GroupBy,
Name,
Projection,
ProjectedColumn,
};
use query_projector_traits::aggregates::{
SimpleAggregation,
projected_column_for_simple_aggregate,
};
use query_projector_traits::errors::{
ProjectorError,
Result,
};
use projectors::{
Projector,
};
use pull::{
PullIndices,
PullOperation,
PullTemplate,
};
use super::{
CombinedProjection,
TypedIndex,
};
/// An internal temporary struct to pass between the projection 'walk' and the
/// resultant projector.
/// Projection accumulates four things:
/// - Two SQL projection lists. We need two because aggregate queries are nested
/// in order to apply DISTINCT to values prior to aggregation.
/// - A collection of templates for the projector to use to extract values.
/// - A list of columns to use for grouping. Grouping is a property of the projection!
pub(crate) struct ProjectedElements {
pub sql_projection: Projection,
pub pre_aggregate_projection: Option<Projection>,
pub templates: Vec<TypedIndex>,
// TODO: when we have an expression like
// [:find (pull?x [:foo/name :foo/age]) (pull?x [:foo/friend]) …]
// it would be more efficient to combine them.
pub pulls: Vec<PullTemplate>,
pub group_by: Vec<GroupBy>,
}
impl ProjectedElements {
pub(crate) fn combine(self, projector: Box<Projector>, distinct: bool) -> Result<CombinedProjection> {
Ok(CombinedProjection {
sql_projection: self.sql_projection,
pre_aggregate_projection: self.pre_aggregate_projection,
datalog_projector: projector,
distinct: distinct,
group_by_cols: self.group_by,
})
}
// We need the templates to make a projector that we can then hand to `combine`. This is the easy
// way to get it.
pub(crate) fn take_templates(&mut self) -> Vec<TypedIndex> {
let mut out = vec![];
::std::mem::swap(&mut out, &mut self.templates);
out
}
pub(crate) fn take_pulls(&mut self) -> Vec<PullTemplate> {
let mut out = vec![];
::std::mem::swap(&mut out, &mut self.pulls);
out
}
}
fn candidate_type_column(cc: &ConjoiningClauses, var: &Variable) -> Result<(ColumnOrExpression, Name)> {
cc.extracted_types
.get(var)
.cloned()
.map(|alias| {
let type_name = VariableColumn::VariableTypeTag(var.clone()).column_name();
(ColumnOrExpression::Column(alias), type_name)
})
.ok_or_else(|| ProjectorError::UnboundVariable(var.name()).into())
}
fn cc_column(cc: &ConjoiningClauses, var: &Variable) -> Result<QualifiedAlias> {
cc.column_bindings
.get(var)
.and_then(|cols| cols.get(0).cloned())
.ok_or_else(|| ProjectorError::UnboundVariable(var.name()).into())
}
fn candidate_column(cc: &ConjoiningClauses, var: &Variable) -> Result<(ColumnOrExpression, Name)> {
// Every variable should be bound by the top-level CC to at least
// one column in the query. If that constraint is violated it's a
// bug in our code, so it's appropriate to panic here.
cc_column(cc, var)
.map(|qa| {
let name = VariableColumn::Variable(var.clone()).column_name();
(ColumnOrExpression::Column(qa), name)
})
}
/// Return the projected column -- that is, a value or SQL column and an associated name -- for a
/// given variable. Also return the type.
/// Callers are expected to determine whether to project a type tag as an additional SQL column.
pub fn projected_column_for_var(var: &Variable, cc: &ConjoiningClauses) -> Result<(ProjectedColumn, ValueTypeSet)> {
if let Some(value) = cc.bound_value(&var) {
// If we already know the value, then our lives are easy.
let tag = value.value_type();
let name = VariableColumn::Variable(var.clone()).column_name();
Ok((ProjectedColumn(ColumnOrExpression::Value(value.clone()), name), ValueTypeSet::of_one(tag)))
} else {
// If we don't, then the CC *must* have bound the variable.
let (column, name) = candidate_column(cc, var)?;
Ok((ProjectedColumn(column, name), cc.known_type_set(var)))
}
}
/// Walk an iterator of `Element`s, collecting projector templates and columns.
///
/// Returns a `ProjectedElements`, which combines SQL projections
/// and a `Vec` of `TypedIndex` 'keys' to use when looking up values.
///
/// Callers must ensure that every `Element` is distinct -- a query like
///
/// ```edn
/// [:find?x?x :where [?x _ _]]
/// ```
///
/// should fail to parse. See #358.
pub(crate) fn project_elements<'a, I: IntoIterator<Item = &'a Element>>(
count: usize,
elements: I,
query: &AlgebraicQuery) -> Result<ProjectedElements> {
// Give a little padding for type tags.
let mut inner_projection = Vec::with_capacity(count + 2);
// Everything in the outer query will _either_ be an aggregate operation
// _or_ a reference to a name projected from the inner.
// We'll expand them later.
let mut outer_projection: Vec<Either<Name, ProjectedColumn>> = Vec::with_capacity(count + 2);
let mut i: i32 = 0;
let mut min_max_count: usize = 0;
let mut templates = vec![];
let mut pulls: Vec<PullTemplate> = vec![];
let mut aggregates = false;
// Any variable that appears intact in the :find clause, not inside an aggregate expression.
// "Query variables not in aggregate expressions will group the results and appear intact
// in the result."
// We use an ordered set here so that we group in the correct order.
let mut outer_variables = IndexSet::new();
let mut corresponded_variables = IndexSet::new();
// Any variable that we are projecting from the inner query.
let mut inner_variables = BTreeSet::new();
for e in elements {
// Check for and reject duplicates.
match e {
&Element::Variable(ref var) => {
if outer_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Duplicate variable {} in query.", var)));
}
if corresponded_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Can't project both {} and `(the {})` from a query.", var, var)));
}
},
&Element::Corresponding(ref var) => {
if outer_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Can't project both {} and `(the {})` from a query.", var, var)));
}
if corresponded_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("`(the {})` appears twice in query.", var)));
}
},
&Element::Aggregate(_) => {
},
&Element::Pull(_) => {
},
};
// Record variables -- `(the?x)` and `?x` are different in this regard, because we don't want
// to group on variables that are corresponding-projected.
match e {
&Element::Variable(ref var) => {
outer_variables.insert(var.clone());
},
&Element::Corresponding(ref var) => {
// We will project these later; don't put them in `outer_variables`
// so we know not to group them.
corresponded_variables.insert(var.clone());
},
&Element::Pull(Pull { ref var, patterns: _ }) => {
// We treat `pull` as an ordinary variable extraction,
// and we expand it later.
outer_variables.insert(var.clone());
},
&Element::Aggregate(_) => {
},
};
// Now do the main processing of each element.
match e {
// Each time we come across a variable, we push a SQL column
// into the SQL projection, aliased to the name of the variable,
// and we push an annotated index into the projector.
&Element::Variable(ref var) |
&Element::Corresponding(ref var) => {
inner_variables.insert(var.clone());
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
outer_projection.push(Either::Left(projected_column.1.clone()));
inner_projection.push(projected_column);
if let Some(tag) = type_set.unique_type_tag() {
templates.push(TypedIndex::Known(i, tag));
i += 1; // We used one SQL column.
} else {
templates.push(TypedIndex::Unknown(i, i + 1));
i += 2; // We used two SQL columns.
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
outer_projection.push(Either::Left(type_name));
}
},
&Element::Pull(Pull { ref var, ref patterns }) => {
inner_variables.insert(var.clone());
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
outer_projection.push(Either::Left(projected_column.1.clone()));
inner_projection.push(projected_column);
if let Some(tag) = type_set.unique_type_tag() {
// We will have at least as many SQL columns as Datalog output columns.
// `i` tracks the former. The length of `templates` is the current latter.
// Projecting pull requires grabbing values, which we can do from the raw
// rows, and then populating the output, so we keep both column indices.
let output_index = templates.len();
assert!(output_index <= i as usize);
templates.push(TypedIndex::Known(i, tag));
pulls.push(PullTemplate {
indices: PullIndices {
sql_index: i,
output_index,
},
op: PullOperation((*patterns).clone()),
});
i += 1; // We used one SQL column.
} else {
// This should be impossible: (pull?x) implies that?x is a ref.
unreachable!();
}
},
&Element::Aggregate(ref a) => {
if let Some(simple) = a.to_simple() {
aggregates = true;
use query_projector_traits::aggregates::SimpleAggregationOp::*;
match simple.op {
Max | Min => {
min_max_count += 1;
},
Avg | Count | Sum => (),
}
// When we encounter a simple aggregate -- one in which the aggregation can be
// implemented in SQL, on a single variable -- we just push the SQL aggregation op.
// We must ensure the following:
// - There's a column for the var.
// - The type of the var is known to be restricted to a sensible input set
// (not necessarily a single type, but e.g., all vals must be Double or Long).
// - The type set must be appropriate for the operation. E.g., `Sum` is not a
// meaningful operation on instants.
let (projected_column, return_type) = projected_column_for_simple_aggregate(&simple, &query.cc)?;
outer_projection.push(Either::Right(projected_column));
if!inner_variables.contains(&simple.var) {
inner_variables.insert(simple.var.clone());
let (projected_column, _type_set) = projected_column_for_var(&simple.var, &query.cc)?;
inner_projection.push(projected_column);
if query.cc.known_type_set(&simple.var).unique_type_tag().is_none() {
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &simple.var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
}
// We might regret using the type tag here instead of the `ValueType`.
templates.push(TypedIndex::Known(i, return_type.value_type_tag()));
i += 1;
} else {
// TODO: complex aggregates.
bail!(ProjectorError::NotYetImplemented("complex aggregates".into()));
}
},
}
}
match (min_max_count, corresponded_variables.len()) {
(0, 0) | (_, 0) => {},
(0, _) => {
bail!(ProjectorError::InvalidProjection("Warning: used `the` without `min` or `max`.".to_string()));
},
(1, _) => {
// This is the success case!
},
(n, c) => {
bail!(ProjectorError::AmbiguousAggregates(n, c));
},
}
// Anything used in ORDER BY (which we're given in `named_projection`)
// needs to be in the SQL column list so we can refer to it by name.
//
// They don't affect projection.
//
// If a variable is of a non-fixed type, also project the type tag column, so we don't
// accidentally unify across types when considering uniqueness!
for var in query.named_projection.iter() {
if outer_variables.contains(var) {
continue;
}
// If it's a fixed value, we need do nothing further.
if query.cc.is_value_bound(&var) {
continue;
}
let already_inner = inner_variables.contains(&var);
let (column, name) = candidate_column(&query.cc, &var)?;
if!already_inner {
inner_projection.push(ProjectedColumn(column, name.clone()));
inner_variables.insert(var.clone());
}
outer_projection.push(Either::Left(name));
outer_variables.insert(var.clone());
// We don't care if a column has a single _type_, we care if it has a single type _tag_,
// because that's what we'll use if we're projecting. E.g., Long and Double.
// Single type implies single type tag, and is cheaper, so we check that first.
let types = query.cc.known_type_set(&var);
if!types.has_unique_type_tag() {
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
if!already_inner {
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
outer_projection.push(Either::Left(type_name));
}
}
if!aggregates {
// We're done -- we never need to group unless we're aggregating.
return Ok(ProjectedElements {
sql_projection: Projection::Columns(inner_projection),
pre_aggregate_projection: None,
templates,
pulls,
group_by: vec![],
});
}
// OK, on to aggregates.
// We need to produce two SQL projection lists: one for an inner query and one for the outer.
//
// The inner serves these purposes:
// - Projecting variables to avoid duplicates being elided. (:with)
// - Making bindings available to the outermost query for projection, ordering, and grouping.
//
// The outer is consumed by the projector.
//
// We will also be producing:
// - A GROUP BY list to group the output of the inner query by non-aggregate variables
// so that it can be correctly aggregated.
// Turn this collection of vars into a collection of columns from the query.
// We don't allow grouping on anything but a variable bound in the query.
// We group by tag if necessary.
let mut group_by = Vec::with_capacity(outer_variables.len() + 2);
let vars = outer_variables.into_iter().zip(::std::iter::repeat(true));
let corresponds = corresponded_variables.into_iter().zip(::std::iter::repeat(false));
for (var, group) in vars.chain(corresponds) {
if query.cc.is_value_bound(&var) {
continue;
}
if group {
// The GROUP BY goes outside, but it needs every variable and type tag to be
// projected from inside. Collect in both directions here.
let name = VariableColumn::Variable(var.clone()).column_name();
group_by.push(GroupBy::ProjectedColumn(name));
}
let needs_type_projection =!query.cc.known_type_set(&var).has_unique_type_tag();
let already_inner = inner_variables.contains(&var);
if!already_inner {
let (column, name) = candidate_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(column, name.clone()));
}
if needs_type_projection {
let type_name = VariableColumn::VariableTypeTag(var.clone()).column_name();
if!already_inner {
let type_col = query.cc
.extracted_types
.get(&var)
.cloned()
.ok_or_else(|| ProjectorError::NoTypeAvailableForVariable(var.name().clone()))?;
inner_projection.push(ProjectedColumn(ColumnOrExpression::Column(type_col), type_name.clone()));
}
if group {
group_by.push(GroupBy::ProjectedColumn(type_name));
}
};
}
for var in query.with.iter() {
// We never need to project a constant.
if query.cc.is_value_bound(&var) {
continue;
}
// We don't need to add inner projections for :with if they are already there.
if!inner_variables.contains(&var) {
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
inner_projection.push(projected_column);
if type_set.unique_type_tag().is_none() {
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
}
} | ConjoiningClauses,
QualifiedAlias,
VariableColumn,
};
| random_line_split |
project.rs | };
use mentat_core::{
SQLValueType,
SQLValueTypeSet,
};
use mentat_core::util::{
Either,
};
use edn::query::{
Element,
Pull,
Variable,
};
use mentat_query_algebrizer::{
AlgebraicQuery,
ColumnName,
ConjoiningClauses,
QualifiedAlias,
VariableColumn,
};
use mentat_query_sql::{
ColumnOrExpression,
GroupBy,
Name,
Projection,
ProjectedColumn,
};
use query_projector_traits::aggregates::{
SimpleAggregation,
projected_column_for_simple_aggregate,
};
use query_projector_traits::errors::{
ProjectorError,
Result,
};
use projectors::{
Projector,
};
use pull::{
PullIndices,
PullOperation,
PullTemplate,
};
use super::{
CombinedProjection,
TypedIndex,
};
/// An internal temporary struct to pass between the projection 'walk' and the
/// resultant projector.
/// Projection accumulates four things:
/// - Two SQL projection lists. We need two because aggregate queries are nested
/// in order to apply DISTINCT to values prior to aggregation.
/// - A collection of templates for the projector to use to extract values.
/// - A list of columns to use for grouping. Grouping is a property of the projection!
pub(crate) struct ProjectedElements {
pub sql_projection: Projection,
pub pre_aggregate_projection: Option<Projection>,
pub templates: Vec<TypedIndex>,
// TODO: when we have an expression like
// [:find (pull?x [:foo/name :foo/age]) (pull?x [:foo/friend]) …]
// it would be more efficient to combine them.
pub pulls: Vec<PullTemplate>,
pub group_by: Vec<GroupBy>,
}
impl ProjectedElements {
pub(crate) fn combine(self, projector: Box<Projector>, distinct: bool) -> Result<CombinedProjection> {
Ok(CombinedProjection {
sql_projection: self.sql_projection,
pre_aggregate_projection: self.pre_aggregate_projection,
datalog_projector: projector,
distinct: distinct,
group_by_cols: self.group_by,
})
}
// We need the templates to make a projector that we can then hand to `combine`. This is the easy
// way to get it.
pub(crate) fn take_templates(&mut self) -> Vec<TypedIndex> {
let mut out = vec![];
::std::mem::swap(&mut out, &mut self.templates);
out
}
pub(crate) fn take_pulls(&mut self) -> Vec<PullTemplate> {
let mut out = vec![];
::std::mem::swap(&mut out, &mut self.pulls);
out
}
}
fn ca | c: &ConjoiningClauses, var: &Variable) -> Result<(ColumnOrExpression, Name)> {
cc.extracted_types
.get(var)
.cloned()
.map(|alias| {
let type_name = VariableColumn::VariableTypeTag(var.clone()).column_name();
(ColumnOrExpression::Column(alias), type_name)
})
.ok_or_else(|| ProjectorError::UnboundVariable(var.name()).into())
}
fn cc_column(cc: &ConjoiningClauses, var: &Variable) -> Result<QualifiedAlias> {
cc.column_bindings
.get(var)
.and_then(|cols| cols.get(0).cloned())
.ok_or_else(|| ProjectorError::UnboundVariable(var.name()).into())
}
fn candidate_column(cc: &ConjoiningClauses, var: &Variable) -> Result<(ColumnOrExpression, Name)> {
// Every variable should be bound by the top-level CC to at least
// one column in the query. If that constraint is violated it's a
// bug in our code, so it's appropriate to panic here.
cc_column(cc, var)
.map(|qa| {
let name = VariableColumn::Variable(var.clone()).column_name();
(ColumnOrExpression::Column(qa), name)
})
}
/// Return the projected column -- that is, a value or SQL column and an associated name -- for a
/// given variable. Also return the type.
/// Callers are expected to determine whether to project a type tag as an additional SQL column.
pub fn projected_column_for_var(var: &Variable, cc: &ConjoiningClauses) -> Result<(ProjectedColumn, ValueTypeSet)> {
if let Some(value) = cc.bound_value(&var) {
// If we already know the value, then our lives are easy.
let tag = value.value_type();
let name = VariableColumn::Variable(var.clone()).column_name();
Ok((ProjectedColumn(ColumnOrExpression::Value(value.clone()), name), ValueTypeSet::of_one(tag)))
} else {
// If we don't, then the CC *must* have bound the variable.
let (column, name) = candidate_column(cc, var)?;
Ok((ProjectedColumn(column, name), cc.known_type_set(var)))
}
}
/// Walk an iterator of `Element`s, collecting projector templates and columns.
///
/// Returns a `ProjectedElements`, which combines SQL projections
/// and a `Vec` of `TypedIndex` 'keys' to use when looking up values.
///
/// Callers must ensure that every `Element` is distinct -- a query like
///
/// ```edn
/// [:find?x?x :where [?x _ _]]
/// ```
///
/// should fail to parse. See #358.
pub(crate) fn project_elements<'a, I: IntoIterator<Item = &'a Element>>(
count: usize,
elements: I,
query: &AlgebraicQuery) -> Result<ProjectedElements> {
// Give a little padding for type tags.
let mut inner_projection = Vec::with_capacity(count + 2);
// Everything in the outer query will _either_ be an aggregate operation
// _or_ a reference to a name projected from the inner.
// We'll expand them later.
let mut outer_projection: Vec<Either<Name, ProjectedColumn>> = Vec::with_capacity(count + 2);
let mut i: i32 = 0;
let mut min_max_count: usize = 0;
let mut templates = vec![];
let mut pulls: Vec<PullTemplate> = vec![];
let mut aggregates = false;
// Any variable that appears intact in the :find clause, not inside an aggregate expression.
// "Query variables not in aggregate expressions will group the results and appear intact
// in the result."
// We use an ordered set here so that we group in the correct order.
let mut outer_variables = IndexSet::new();
let mut corresponded_variables = IndexSet::new();
// Any variable that we are projecting from the inner query.
let mut inner_variables = BTreeSet::new();
for e in elements {
// Check for and reject duplicates.
match e {
&Element::Variable(ref var) => {
if outer_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Duplicate variable {} in query.", var)));
}
if corresponded_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Can't project both {} and `(the {})` from a query.", var, var)));
}
},
&Element::Corresponding(ref var) => {
if outer_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Can't project both {} and `(the {})` from a query.", var, var)));
}
if corresponded_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("`(the {})` appears twice in query.", var)));
}
},
&Element::Aggregate(_) => {
},
&Element::Pull(_) => {
},
};
// Record variables -- `(the?x)` and `?x` are different in this regard, because we don't want
// to group on variables that are corresponding-projected.
match e {
&Element::Variable(ref var) => {
outer_variables.insert(var.clone());
},
&Element::Corresponding(ref var) => {
// We will project these later; don't put them in `outer_variables`
// so we know not to group them.
corresponded_variables.insert(var.clone());
},
&Element::Pull(Pull { ref var, patterns: _ }) => {
// We treat `pull` as an ordinary variable extraction,
// and we expand it later.
outer_variables.insert(var.clone());
},
&Element::Aggregate(_) => {
},
};
// Now do the main processing of each element.
match e {
// Each time we come across a variable, we push a SQL column
// into the SQL projection, aliased to the name of the variable,
// and we push an annotated index into the projector.
&Element::Variable(ref var) |
&Element::Corresponding(ref var) => {
inner_variables.insert(var.clone());
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
outer_projection.push(Either::Left(projected_column.1.clone()));
inner_projection.push(projected_column);
if let Some(tag) = type_set.unique_type_tag() {
templates.push(TypedIndex::Known(i, tag));
i += 1; // We used one SQL column.
} else {
templates.push(TypedIndex::Unknown(i, i + 1));
i += 2; // We used two SQL columns.
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
outer_projection.push(Either::Left(type_name));
}
},
&Element::Pull(Pull { ref var, ref patterns }) => {
inner_variables.insert(var.clone());
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
outer_projection.push(Either::Left(projected_column.1.clone()));
inner_projection.push(projected_column);
if let Some(tag) = type_set.unique_type_tag() {
// We will have at least as many SQL columns as Datalog output columns.
// `i` tracks the former. The length of `templates` is the current latter.
// Projecting pull requires grabbing values, which we can do from the raw
// rows, and then populating the output, so we keep both column indices.
let output_index = templates.len();
assert!(output_index <= i as usize);
templates.push(TypedIndex::Known(i, tag));
pulls.push(PullTemplate {
indices: PullIndices {
sql_index: i,
output_index,
},
op: PullOperation((*patterns).clone()),
});
i += 1; // We used one SQL column.
} else {
// This should be impossible: (pull?x) implies that?x is a ref.
unreachable!();
}
},
&Element::Aggregate(ref a) => {
if let Some(simple) = a.to_simple() {
aggregates = true;
use query_projector_traits::aggregates::SimpleAggregationOp::*;
match simple.op {
Max | Min => {
min_max_count += 1;
},
Avg | Count | Sum => (),
}
// When we encounter a simple aggregate -- one in which the aggregation can be
// implemented in SQL, on a single variable -- we just push the SQL aggregation op.
// We must ensure the following:
// - There's a column for the var.
// - The type of the var is known to be restricted to a sensible input set
// (not necessarily a single type, but e.g., all vals must be Double or Long).
// - The type set must be appropriate for the operation. E.g., `Sum` is not a
// meaningful operation on instants.
let (projected_column, return_type) = projected_column_for_simple_aggregate(&simple, &query.cc)?;
outer_projection.push(Either::Right(projected_column));
if!inner_variables.contains(&simple.var) {
inner_variables.insert(simple.var.clone());
let (projected_column, _type_set) = projected_column_for_var(&simple.var, &query.cc)?;
inner_projection.push(projected_column);
if query.cc.known_type_set(&simple.var).unique_type_tag().is_none() {
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &simple.var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
}
// We might regret using the type tag here instead of the `ValueType`.
templates.push(TypedIndex::Known(i, return_type.value_type_tag()));
i += 1;
} else {
// TODO: complex aggregates.
bail!(ProjectorError::NotYetImplemented("complex aggregates".into()));
}
},
}
}
match (min_max_count, corresponded_variables.len()) {
(0, 0) | (_, 0) => {},
(0, _) => {
bail!(ProjectorError::InvalidProjection("Warning: used `the` without `min` or `max`.".to_string()));
},
(1, _) => {
// This is the success case!
},
(n, c) => {
bail!(ProjectorError::AmbiguousAggregates(n, c));
},
}
// Anything used in ORDER BY (which we're given in `named_projection`)
// needs to be in the SQL column list so we can refer to it by name.
//
// They don't affect projection.
//
// If a variable is of a non-fixed type, also project the type tag column, so we don't
// accidentally unify across types when considering uniqueness!
for var in query.named_projection.iter() {
if outer_variables.contains(var) {
continue;
}
// If it's a fixed value, we need do nothing further.
if query.cc.is_value_bound(&var) {
continue;
}
let already_inner = inner_variables.contains(&var);
let (column, name) = candidate_column(&query.cc, &var)?;
if!already_inner {
inner_projection.push(ProjectedColumn(column, name.clone()));
inner_variables.insert(var.clone());
}
outer_projection.push(Either::Left(name));
outer_variables.insert(var.clone());
// We don't care if a column has a single _type_, we care if it has a single type _tag_,
// because that's what we'll use if we're projecting. E.g., Long and Double.
// Single type implies single type tag, and is cheaper, so we check that first.
let types = query.cc.known_type_set(&var);
if!types.has_unique_type_tag() {
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
if!already_inner {
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
outer_projection.push(Either::Left(type_name));
}
}
if!aggregates {
// We're done -- we never need to group unless we're aggregating.
return Ok(ProjectedElements {
sql_projection: Projection::Columns(inner_projection),
pre_aggregate_projection: None,
templates,
pulls,
group_by: vec![],
});
}
// OK, on to aggregates.
// We need to produce two SQL projection lists: one for an inner query and one for the outer.
//
// The inner serves these purposes:
// - Projecting variables to avoid duplicates being elided. (:with)
// - Making bindings available to the outermost query for projection, ordering, and grouping.
//
// The outer is consumed by the projector.
//
// We will also be producing:
// - A GROUP BY list to group the output of the inner query by non-aggregate variables
// so that it can be correctly aggregated.
// Turn this collection of vars into a collection of columns from the query.
// We don't allow grouping on anything but a variable bound in the query.
// We group by tag if necessary.
let mut group_by = Vec::with_capacity(outer_variables.len() + 2);
let vars = outer_variables.into_iter().zip(::std::iter::repeat(true));
let corresponds = corresponded_variables.into_iter().zip(::std::iter::repeat(false));
for (var, group) in vars.chain(corresponds) {
if query.cc.is_value_bound(&var) {
continue;
}
if group {
// The GROUP BY goes outside, but it needs every variable and type tag to be
// projected from inside. Collect in both directions here.
let name = VariableColumn::Variable(var.clone()).column_name();
group_by.push(GroupBy::ProjectedColumn(name));
}
let needs_type_projection =!query.cc.known_type_set(&var).has_unique_type_tag();
let already_inner = inner_variables.contains(&var);
if!already_inner {
let (column, name) = candidate_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(column, name.clone()));
}
if needs_type_projection {
let type_name = VariableColumn::VariableTypeTag(var.clone()).column_name();
if!already_inner {
let type_col = query.cc
.extracted_types
.get(&var)
.cloned()
.ok_or_else(|| ProjectorError::NoTypeAvailableForVariable(var.name().clone()))?;
inner_projection.push(ProjectedColumn(ColumnOrExpression::Column(type_col), type_name.clone()));
}
if group {
group_by.push(GroupBy::ProjectedColumn(type_name));
}
};
}
for var in query.with.iter() {
// We never need to project a constant.
if query.cc.is_value_bound(&var) {
continue;
}
// We don't need to add inner projections for :with if they are already there.
if!inner_variables.contains(&var) {
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
inner_projection.push(projected_column);
if type_set.unique_type_tag().is_none() {
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
}
| ndidate_type_column(c | identifier_name |
project.rs |
};
use mentat_core::{
SQLValueType,
SQLValueTypeSet,
};
use mentat_core::util::{
Either,
};
use edn::query::{
Element,
Pull,
Variable,
};
use mentat_query_algebrizer::{
AlgebraicQuery,
ColumnName,
ConjoiningClauses,
QualifiedAlias,
VariableColumn,
};
use mentat_query_sql::{
ColumnOrExpression,
GroupBy,
Name,
Projection,
ProjectedColumn,
};
use query_projector_traits::aggregates::{
SimpleAggregation,
projected_column_for_simple_aggregate,
};
use query_projector_traits::errors::{
ProjectorError,
Result,
};
use projectors::{
Projector,
};
use pull::{
PullIndices,
PullOperation,
PullTemplate,
};
use super::{
CombinedProjection,
TypedIndex,
};
/// An internal temporary struct to pass between the projection 'walk' and the
/// resultant projector.
/// Projection accumulates four things:
/// - Two SQL projection lists. We need two because aggregate queries are nested
/// in order to apply DISTINCT to values prior to aggregation.
/// - A collection of templates for the projector to use to extract values.
/// - A list of columns to use for grouping. Grouping is a property of the projection!
pub(crate) struct ProjectedElements {
pub sql_projection: Projection,
pub pre_aggregate_projection: Option<Projection>,
pub templates: Vec<TypedIndex>,
// TODO: when we have an expression like
// [:find (pull?x [:foo/name :foo/age]) (pull?x [:foo/friend]) …]
// it would be more efficient to combine them.
pub pulls: Vec<PullTemplate>,
pub group_by: Vec<GroupBy>,
}
impl ProjectedElements {
pub(crate) fn combine(self, projector: Box<Projector>, distinct: bool) -> Result<CombinedProjection> {
Ok(CombinedProjection {
sql_projection: self.sql_projection,
pre_aggregate_projection: self.pre_aggregate_projection,
datalog_projector: projector,
distinct: distinct,
group_by_cols: self.group_by,
})
}
// We need the templates to make a projector that we can then hand to `combine`. This is the easy
// way to get it.
pub(crate) fn take_templates(&mut self) -> Vec<TypedIndex> {
| pub(crate) fn take_pulls(&mut self) -> Vec<PullTemplate> {
let mut out = vec![];
::std::mem::swap(&mut out, &mut self.pulls);
out
}
}
fn candidate_type_column(cc: &ConjoiningClauses, var: &Variable) -> Result<(ColumnOrExpression, Name)> {
cc.extracted_types
.get(var)
.cloned()
.map(|alias| {
let type_name = VariableColumn::VariableTypeTag(var.clone()).column_name();
(ColumnOrExpression::Column(alias), type_name)
})
.ok_or_else(|| ProjectorError::UnboundVariable(var.name()).into())
}
fn cc_column(cc: &ConjoiningClauses, var: &Variable) -> Result<QualifiedAlias> {
cc.column_bindings
.get(var)
.and_then(|cols| cols.get(0).cloned())
.ok_or_else(|| ProjectorError::UnboundVariable(var.name()).into())
}
fn candidate_column(cc: &ConjoiningClauses, var: &Variable) -> Result<(ColumnOrExpression, Name)> {
// Every variable should be bound by the top-level CC to at least
// one column in the query. If that constraint is violated it's a
// bug in our code, so it's appropriate to panic here.
cc_column(cc, var)
.map(|qa| {
let name = VariableColumn::Variable(var.clone()).column_name();
(ColumnOrExpression::Column(qa), name)
})
}
/// Return the projected column -- that is, a value or SQL column and an associated name -- for a
/// given variable. Also return the type.
/// Callers are expected to determine whether to project a type tag as an additional SQL column.
pub fn projected_column_for_var(var: &Variable, cc: &ConjoiningClauses) -> Result<(ProjectedColumn, ValueTypeSet)> {
if let Some(value) = cc.bound_value(&var) {
// If we already know the value, then our lives are easy.
let tag = value.value_type();
let name = VariableColumn::Variable(var.clone()).column_name();
Ok((ProjectedColumn(ColumnOrExpression::Value(value.clone()), name), ValueTypeSet::of_one(tag)))
} else {
// If we don't, then the CC *must* have bound the variable.
let (column, name) = candidate_column(cc, var)?;
Ok((ProjectedColumn(column, name), cc.known_type_set(var)))
}
}
/// Walk an iterator of `Element`s, collecting projector templates and columns.
///
/// Returns a `ProjectedElements`, which combines SQL projections
/// and a `Vec` of `TypedIndex` 'keys' to use when looking up values.
///
/// Callers must ensure that every `Element` is distinct -- a query like
///
/// ```edn
/// [:find?x?x :where [?x _ _]]
/// ```
///
/// should fail to parse. See #358.
pub(crate) fn project_elements<'a, I: IntoIterator<Item = &'a Element>>(
count: usize,
elements: I,
query: &AlgebraicQuery) -> Result<ProjectedElements> {
// Give a little padding for type tags.
let mut inner_projection = Vec::with_capacity(count + 2);
// Everything in the outer query will _either_ be an aggregate operation
// _or_ a reference to a name projected from the inner.
// We'll expand them later.
let mut outer_projection: Vec<Either<Name, ProjectedColumn>> = Vec::with_capacity(count + 2);
let mut i: i32 = 0;
let mut min_max_count: usize = 0;
let mut templates = vec![];
let mut pulls: Vec<PullTemplate> = vec![];
let mut aggregates = false;
// Any variable that appears intact in the :find clause, not inside an aggregate expression.
// "Query variables not in aggregate expressions will group the results and appear intact
// in the result."
// We use an ordered set here so that we group in the correct order.
let mut outer_variables = IndexSet::new();
let mut corresponded_variables = IndexSet::new();
// Any variable that we are projecting from the inner query.
let mut inner_variables = BTreeSet::new();
for e in elements {
// Check for and reject duplicates.
match e {
&Element::Variable(ref var) => {
if outer_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Duplicate variable {} in query.", var)));
}
if corresponded_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Can't project both {} and `(the {})` from a query.", var, var)));
}
},
&Element::Corresponding(ref var) => {
if outer_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("Can't project both {} and `(the {})` from a query.", var, var)));
}
if corresponded_variables.contains(var) {
bail!(ProjectorError::InvalidProjection(format!("`(the {})` appears twice in query.", var)));
}
},
&Element::Aggregate(_) => {
},
&Element::Pull(_) => {
},
};
// Record variables -- `(the?x)` and `?x` are different in this regard, because we don't want
// to group on variables that are corresponding-projected.
match e {
&Element::Variable(ref var) => {
outer_variables.insert(var.clone());
},
&Element::Corresponding(ref var) => {
// We will project these later; don't put them in `outer_variables`
// so we know not to group them.
corresponded_variables.insert(var.clone());
},
&Element::Pull(Pull { ref var, patterns: _ }) => {
// We treat `pull` as an ordinary variable extraction,
// and we expand it later.
outer_variables.insert(var.clone());
},
&Element::Aggregate(_) => {
},
};
// Now do the main processing of each element.
match e {
// Each time we come across a variable, we push a SQL column
// into the SQL projection, aliased to the name of the variable,
// and we push an annotated index into the projector.
&Element::Variable(ref var) |
&Element::Corresponding(ref var) => {
inner_variables.insert(var.clone());
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
outer_projection.push(Either::Left(projected_column.1.clone()));
inner_projection.push(projected_column);
if let Some(tag) = type_set.unique_type_tag() {
templates.push(TypedIndex::Known(i, tag));
i += 1; // We used one SQL column.
} else {
templates.push(TypedIndex::Unknown(i, i + 1));
i += 2; // We used two SQL columns.
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
outer_projection.push(Either::Left(type_name));
}
},
&Element::Pull(Pull { ref var, ref patterns }) => {
inner_variables.insert(var.clone());
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
outer_projection.push(Either::Left(projected_column.1.clone()));
inner_projection.push(projected_column);
if let Some(tag) = type_set.unique_type_tag() {
// We will have at least as many SQL columns as Datalog output columns.
// `i` tracks the former. The length of `templates` is the current latter.
// Projecting pull requires grabbing values, which we can do from the raw
// rows, and then populating the output, so we keep both column indices.
let output_index = templates.len();
assert!(output_index <= i as usize);
templates.push(TypedIndex::Known(i, tag));
pulls.push(PullTemplate {
indices: PullIndices {
sql_index: i,
output_index,
},
op: PullOperation((*patterns).clone()),
});
i += 1; // We used one SQL column.
} else {
// This should be impossible: (pull?x) implies that?x is a ref.
unreachable!();
}
},
&Element::Aggregate(ref a) => {
if let Some(simple) = a.to_simple() {
aggregates = true;
use query_projector_traits::aggregates::SimpleAggregationOp::*;
match simple.op {
Max | Min => {
min_max_count += 1;
},
Avg | Count | Sum => (),
}
// When we encounter a simple aggregate -- one in which the aggregation can be
// implemented in SQL, on a single variable -- we just push the SQL aggregation op.
// We must ensure the following:
// - There's a column for the var.
// - The type of the var is known to be restricted to a sensible input set
// (not necessarily a single type, but e.g., all vals must be Double or Long).
// - The type set must be appropriate for the operation. E.g., `Sum` is not a
// meaningful operation on instants.
let (projected_column, return_type) = projected_column_for_simple_aggregate(&simple, &query.cc)?;
outer_projection.push(Either::Right(projected_column));
if!inner_variables.contains(&simple.var) {
inner_variables.insert(simple.var.clone());
let (projected_column, _type_set) = projected_column_for_var(&simple.var, &query.cc)?;
inner_projection.push(projected_column);
if query.cc.known_type_set(&simple.var).unique_type_tag().is_none() {
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &simple.var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
}
// We might regret using the type tag here instead of the `ValueType`.
templates.push(TypedIndex::Known(i, return_type.value_type_tag()));
i += 1;
} else {
// TODO: complex aggregates.
bail!(ProjectorError::NotYetImplemented("complex aggregates".into()));
}
},
}
}
match (min_max_count, corresponded_variables.len()) {
(0, 0) | (_, 0) => {},
(0, _) => {
bail!(ProjectorError::InvalidProjection("Warning: used `the` without `min` or `max`.".to_string()));
},
(1, _) => {
// This is the success case!
},
(n, c) => {
bail!(ProjectorError::AmbiguousAggregates(n, c));
},
}
// Anything used in ORDER BY (which we're given in `named_projection`)
// needs to be in the SQL column list so we can refer to it by name.
//
// They don't affect projection.
//
// If a variable is of a non-fixed type, also project the type tag column, so we don't
// accidentally unify across types when considering uniqueness!
for var in query.named_projection.iter() {
if outer_variables.contains(var) {
continue;
}
// If it's a fixed value, we need do nothing further.
if query.cc.is_value_bound(&var) {
continue;
}
let already_inner = inner_variables.contains(&var);
let (column, name) = candidate_column(&query.cc, &var)?;
if!already_inner {
inner_projection.push(ProjectedColumn(column, name.clone()));
inner_variables.insert(var.clone());
}
outer_projection.push(Either::Left(name));
outer_variables.insert(var.clone());
// We don't care if a column has a single _type_, we care if it has a single type _tag_,
// because that's what we'll use if we're projecting. E.g., Long and Double.
// Single type implies single type tag, and is cheaper, so we check that first.
let types = query.cc.known_type_set(&var);
if!types.has_unique_type_tag() {
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
if!already_inner {
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
outer_projection.push(Either::Left(type_name));
}
}
if!aggregates {
// We're done -- we never need to group unless we're aggregating.
return Ok(ProjectedElements {
sql_projection: Projection::Columns(inner_projection),
pre_aggregate_projection: None,
templates,
pulls,
group_by: vec![],
});
}
// OK, on to aggregates.
// We need to produce two SQL projection lists: one for an inner query and one for the outer.
//
// The inner serves these purposes:
// - Projecting variables to avoid duplicates being elided. (:with)
// - Making bindings available to the outermost query for projection, ordering, and grouping.
//
// The outer is consumed by the projector.
//
// We will also be producing:
// - A GROUP BY list to group the output of the inner query by non-aggregate variables
// so that it can be correctly aggregated.
// Turn this collection of vars into a collection of columns from the query.
// We don't allow grouping on anything but a variable bound in the query.
// We group by tag if necessary.
let mut group_by = Vec::with_capacity(outer_variables.len() + 2);
let vars = outer_variables.into_iter().zip(::std::iter::repeat(true));
let corresponds = corresponded_variables.into_iter().zip(::std::iter::repeat(false));
for (var, group) in vars.chain(corresponds) {
if query.cc.is_value_bound(&var) {
continue;
}
if group {
// The GROUP BY goes outside, but it needs every variable and type tag to be
// projected from inside. Collect in both directions here.
let name = VariableColumn::Variable(var.clone()).column_name();
group_by.push(GroupBy::ProjectedColumn(name));
}
let needs_type_projection =!query.cc.known_type_set(&var).has_unique_type_tag();
let already_inner = inner_variables.contains(&var);
if!already_inner {
let (column, name) = candidate_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(column, name.clone()));
}
if needs_type_projection {
let type_name = VariableColumn::VariableTypeTag(var.clone()).column_name();
if!already_inner {
let type_col = query.cc
.extracted_types
.get(&var)
.cloned()
.ok_or_else(|| ProjectorError::NoTypeAvailableForVariable(var.name().clone()))?;
inner_projection.push(ProjectedColumn(ColumnOrExpression::Column(type_col), type_name.clone()));
}
if group {
group_by.push(GroupBy::ProjectedColumn(type_name));
}
};
}
for var in query.with.iter() {
// We never need to project a constant.
if query.cc.is_value_bound(&var) {
continue;
}
// We don't need to add inner projections for :with if they are already there.
if!inner_variables.contains(&var) {
let (projected_column, type_set) = projected_column_for_var(&var, &query.cc)?;
inner_projection.push(projected_column);
if type_set.unique_type_tag().is_none() {
// Also project the type from the SQL query.
let (type_column, type_name) = candidate_type_column(&query.cc, &var)?;
inner_projection.push(ProjectedColumn(type_column, type_name.clone()));
}
}
| let mut out = vec![];
::std::mem::swap(&mut out, &mut self.templates);
out
}
| identifier_body |
hmacsha512256.rs | //! `HMAC-SHA-512-256`, i.e., the first 256 bits of
//! `HMAC-SHA-512`. `HMAC-SHA-512-256` is conjectured to meet the standard notion
//! of unforgeability.
use ffi::{crypto_auth_hmacsha512256,
crypto_auth_hmacsha512256_verify,
crypto_auth_hmacsha512256_KEYBYTES,
crypto_auth_hmacsha512256_BYTES};
auth_module!(crypto_auth_hmacsha512256,
crypto_auth_hmacsha512256_verify,
crypto_auth_hmacsha512256_KEYBYTES,
crypto_auth_hmacsha512256_BYTES);
#[cfg(test)]
mod test {
use super::*;
#[test]
fn | () {
// corresponding to tests/auth.c from NaCl
// "Test Case 2" from RFC 4231
let key = Key([74, 101, 102, 101, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0]);
let c = [0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20
,0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20
,0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68
,0x69, 0x6e, 0x67, 0x3f];
let a_expected = [0x16,0x4b,0x7a,0x7b,0xfc,0xf8,0x19,0xe2
,0xe3,0x95,0xfb,0xe7,0x3b,0x56,0xe0,0xa3
,0x87,0xbd,0x64,0x22,0x2e,0x83,0x1f,0xd6
,0x10,0x27,0x0c,0xd7,0xea,0x25,0x05,0x54];
let Tag(a) = authenticate(&c, &key);
assert!(a == a_expected);
}
}
| test_vector_1 | identifier_name |
hmacsha512256.rs | //! `HMAC-SHA-512-256`, i.e., the first 256 bits of
//! `HMAC-SHA-512`. `HMAC-SHA-512-256` is conjectured to meet the standard notion
//! of unforgeability.
use ffi::{crypto_auth_hmacsha512256,
crypto_auth_hmacsha512256_verify,
crypto_auth_hmacsha512256_KEYBYTES,
crypto_auth_hmacsha512256_BYTES};
auth_module!(crypto_auth_hmacsha512256,
crypto_auth_hmacsha512256_verify,
crypto_auth_hmacsha512256_KEYBYTES,
crypto_auth_hmacsha512256_BYTES);
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_vector_1() |
}
| {
// corresponding to tests/auth.c from NaCl
// "Test Case 2" from RFC 4231
let key = Key([74, 101, 102, 101, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0]);
let c = [0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20
,0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20
,0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68
,0x69, 0x6e, 0x67, 0x3f];
let a_expected = [0x16,0x4b,0x7a,0x7b,0xfc,0xf8,0x19,0xe2
,0xe3,0x95,0xfb,0xe7,0x3b,0x56,0xe0,0xa3
,0x87,0xbd,0x64,0x22,0x2e,0x83,0x1f,0xd6
,0x10,0x27,0x0c,0xd7,0xea,0x25,0x05,0x54];
let Tag(a) = authenticate(&c, &key);
assert!(a == a_expected);
} | identifier_body |
hmacsha512256.rs | //! `HMAC-SHA-512`. `HMAC-SHA-512-256` is conjectured to meet the standard notion
//! of unforgeability.
use ffi::{crypto_auth_hmacsha512256,
crypto_auth_hmacsha512256_verify,
crypto_auth_hmacsha512256_KEYBYTES,
crypto_auth_hmacsha512256_BYTES};
auth_module!(crypto_auth_hmacsha512256,
crypto_auth_hmacsha512256_verify,
crypto_auth_hmacsha512256_KEYBYTES,
crypto_auth_hmacsha512256_BYTES);
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_vector_1() {
// corresponding to tests/auth.c from NaCl
// "Test Case 2" from RFC 4231
let key = Key([74, 101, 102, 101, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0]);
let c = [0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20
,0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20
,0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68
,0x69, 0x6e, 0x67, 0x3f];
let a_expected = [0x16,0x4b,0x7a,0x7b,0xfc,0xf8,0x19,0xe2
,0xe3,0x95,0xfb,0xe7,0x3b,0x56,0xe0,0xa3
,0x87,0xbd,0x64,0x22,0x2e,0x83,0x1f,0xd6
,0x10,0x27,0x0c,0xd7,0xea,0x25,0x05,0x54];
let Tag(a) = authenticate(&c, &key);
assert!(a == a_expected);
}
} | //! `HMAC-SHA-512-256`, i.e., the first 256 bits of | random_line_split |
|
enum-nullable-simplifycfg-misopt.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | // option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
/*!
* This is a regression test for a bug in LLVM, fixed in upstream r179587,
* where the switch instructions generated for destructuring enums
* represented with nullable pointers could be misoptimized in some cases.
*/
enum List<X> { Nil, Cons(X, Box<List<X>>) }
pub fn main() {
match List::Cons(10, box List::Nil) {
List::Cons(10, _) => {}
List::Nil => {}
_ => panic!()
}
} | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | random_line_split |
enum-nullable-simplifycfg-misopt.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
/*!
* This is a regression test for a bug in LLVM, fixed in upstream r179587,
* where the switch instructions generated for destructuring enums
* represented with nullable pointers could be misoptimized in some cases.
*/
enum | <X> { Nil, Cons(X, Box<List<X>>) }
pub fn main() {
match List::Cons(10, box List::Nil) {
List::Cons(10, _) => {}
List::Nil => {}
_ => panic!()
}
}
| List | identifier_name |
enum-nullable-simplifycfg-misopt.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
/*!
* This is a regression test for a bug in LLVM, fixed in upstream r179587,
* where the switch instructions generated for destructuring enums
* represented with nullable pointers could be misoptimized in some cases.
*/
enum List<X> { Nil, Cons(X, Box<List<X>>) }
pub fn main() | {
match List::Cons(10, box List::Nil) {
List::Cons(10, _) => {}
List::Nil => {}
_ => panic!()
}
} | identifier_body |
|
literals.rs | // https://rustbyexample.com/cast/literals.html
// http://rust-lang-ja.org/rust-by-example/cast/literals.html
fn main() {
// Suffixed literals, their types are known at initialization
let x = 1u8;
let y = 2u32;
let z = 3f32;
// Unsuffixed literal, their types depend on how they are used
let i = 1;
let f = 1.0;
// `size_of_val` returns the size of a variable in bytes
println!("size of `x` in bytes: {}", std::mem::size_of_val(&x));
println!("size of `y` in bytes: {}", std::mem::size_of_val(&y));
println!("size of `z` in bytes: {}", std::mem::size_of_val(&z)); | println!("size of `i` in bytes: {}", std::mem::size_of_val(&i));
println!("size of `f` in bytes: {}", std::mem::size_of_val(&f));
} | random_line_split |
|
literals.rs | // https://rustbyexample.com/cast/literals.html
// http://rust-lang-ja.org/rust-by-example/cast/literals.html
fn | () {
// Suffixed literals, their types are known at initialization
let x = 1u8;
let y = 2u32;
let z = 3f32;
// Unsuffixed literal, their types depend on how they are used
let i = 1;
let f = 1.0;
// `size_of_val` returns the size of a variable in bytes
println!("size of `x` in bytes: {}", std::mem::size_of_val(&x));
println!("size of `y` in bytes: {}", std::mem::size_of_val(&y));
println!("size of `z` in bytes: {}", std::mem::size_of_val(&z));
println!("size of `i` in bytes: {}", std::mem::size_of_val(&i));
println!("size of `f` in bytes: {}", std::mem::size_of_val(&f));
}
| main | identifier_name |
literals.rs | // https://rustbyexample.com/cast/literals.html
// http://rust-lang-ja.org/rust-by-example/cast/literals.html
fn main() | {
// Suffixed literals, their types are known at initialization
let x = 1u8;
let y = 2u32;
let z = 3f32;
// Unsuffixed literal, their types depend on how they are used
let i = 1;
let f = 1.0;
// `size_of_val` returns the size of a variable in bytes
println!("size of `x` in bytes: {}", std::mem::size_of_val(&x));
println!("size of `y` in bytes: {}", std::mem::size_of_val(&y));
println!("size of `z` in bytes: {}", std::mem::size_of_val(&z));
println!("size of `i` in bytes: {}", std::mem::size_of_val(&i));
println!("size of `f` in bytes: {}", std::mem::size_of_val(&f));
} | identifier_body |
|
macro_import.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Used by `rustc` when loading a crate with exported macros.
use session::Session;
use metadata::creader::CrateReader;
use std::collections::{HashSet, HashMap};
use syntax::ast;
use syntax::attr;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit;
use syntax::visit::Visitor;
use syntax::attr::AttrMetaMethods;
struct MacroLoader<'a> {
sess: &'a Session,
span_whitelist: HashSet<Span>,
reader: CrateReader<'a>,
macros: Vec<ast::MacroDef>,
}
impl<'a> MacroLoader<'a> {
fn new(sess: &'a Session) -> MacroLoader<'a> {
MacroLoader {
sess: sess,
span_whitelist: HashSet::new(),
reader: CrateReader::new(sess),
macros: vec![],
}
}
}
/// Read exported macros.
pub fn read_macro_defs(sess: &Session, krate: &ast::Crate) -> Vec<ast::MacroDef> {
let mut loader = MacroLoader::new(sess);
// We need to error on `#[macro_use] extern crate` when it isn't at the
// crate root, because `$crate` won't work properly. Identify these by
// spans, because the crate map isn't set up yet.
for item in &krate.module.items {
if let ast::ItemExternCrate(_) = item.node {
loader.span_whitelist.insert(item.span);
}
}
visit::walk_crate(&mut loader, krate);
loader.macros
}
pub type MacroSelection = HashMap<token::InternedString, Span>;
// note that macros aren't expanded yet, and therefore macros can't add macro imports.
impl<'a, 'v> Visitor<'v> for MacroLoader<'a> {
fn visit_item(&mut self, item: &ast::Item) {
// We're only interested in `extern crate`.
match item.node {
ast::ItemExternCrate(_) => {}
_ => {
visit::walk_item(self, item);
return;
}
}
// Parse the attributes relating to macros.
let mut import = Some(HashMap::new()); // None => load all
let mut reexport = HashMap::new();
for attr in &item.attrs {
let mut used = true;
match &attr.name()[] {
"phase" => {
self.sess.span_err(attr.span, "#[phase] is deprecated");
}
"plugin" => {
self.sess.span_err(attr.span, "#[plugin] on `extern crate` is deprecated");
self.sess.span_help(attr.span, &format!("use a crate attribute instead, \
i.e. #![plugin({})]",
item.ident.as_str())[]);
}
"macro_use" => {
let names = attr.meta_item_list();
if names.is_none() {
// no names => load all
import = None;
}
if let (Some(sel), Some(names)) = (import.as_mut(), names) {
for attr in names {
if let ast::MetaWord(ref name) = attr.node {
sel.insert(name.clone(), attr.span);
} else {
self.sess.span_err(attr.span, "bad macro import");
}
}
}
}
"macro_reexport" => {
let names = match attr.meta_item_list() {
Some(names) => names,
None => {
self.sess.span_err(attr.span, "bad macro reexport");
continue;
}
};
for attr in names {
if let ast::MetaWord(ref name) = attr.node {
reexport.insert(name.clone(), attr.span);
} else {
self.sess.span_err(attr.span, "bad macro reexport");
}
}
}
_ => used = false,
}
if used {
attr::mark_used(attr);
}
}
self.load_macros(item, import, reexport)
}
fn visit_mac(&mut self, _: &ast::Mac) |
}
impl<'a> MacroLoader<'a> {
fn load_macros<'b>(&mut self,
vi: &ast::Item,
import: Option<MacroSelection>,
reexport: MacroSelection) {
if let Some(sel) = import.as_ref() {
if sel.is_empty() && reexport.is_empty() {
return;
}
}
if!self.span_whitelist.contains(&vi.span) {
self.sess.span_err(vi.span, "an `extern crate` loading macros must be at \
the crate root");
return;
}
let macros = self.reader.read_exported_macros(vi);
let mut seen = HashSet::new();
for mut def in macros {
let name = token::get_ident(def.ident);
seen.insert(name.clone());
def.use_locally = match import.as_ref() {
None => true,
Some(sel) => sel.contains_key(&name),
};
def.export = reexport.contains_key(&name);
self.macros.push(def);
}
if let Some(sel) = import.as_ref() {
for (name, span) in sel.iter() {
if!seen.contains(name) {
self.sess.span_err(*span, "imported macro not found");
}
}
}
for (name, span) in reexport.iter() {
if!seen.contains(name) {
self.sess.span_err(*span, "reexported macro not found");
}
}
}
}
| {
// bummer... can't see macro imports inside macros.
// do nothing.
} | identifier_body |
macro_import.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Used by `rustc` when loading a crate with exported macros.
use session::Session;
use metadata::creader::CrateReader;
use std::collections::{HashSet, HashMap};
use syntax::ast;
use syntax::attr;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit;
use syntax::visit::Visitor;
use syntax::attr::AttrMetaMethods;
struct MacroLoader<'a> {
sess: &'a Session,
span_whitelist: HashSet<Span>,
reader: CrateReader<'a>,
macros: Vec<ast::MacroDef>,
}
impl<'a> MacroLoader<'a> {
fn new(sess: &'a Session) -> MacroLoader<'a> {
MacroLoader {
sess: sess,
span_whitelist: HashSet::new(),
reader: CrateReader::new(sess),
macros: vec![],
}
}
}
/// Read exported macros.
pub fn read_macro_defs(sess: &Session, krate: &ast::Crate) -> Vec<ast::MacroDef> {
let mut loader = MacroLoader::new(sess);
// We need to error on `#[macro_use] extern crate` when it isn't at the
// crate root, because `$crate` won't work properly. Identify these by
// spans, because the crate map isn't set up yet.
for item in &krate.module.items {
if let ast::ItemExternCrate(_) = item.node {
loader.span_whitelist.insert(item.span);
}
}
visit::walk_crate(&mut loader, krate);
loader.macros
}
pub type MacroSelection = HashMap<token::InternedString, Span>;
// note that macros aren't expanded yet, and therefore macros can't add macro imports.
impl<'a, 'v> Visitor<'v> for MacroLoader<'a> {
fn visit_item(&mut self, item: &ast::Item) {
// We're only interested in `extern crate`.
match item.node {
ast::ItemExternCrate(_) => {}
_ => {
visit::walk_item(self, item);
return;
}
}
// Parse the attributes relating to macros.
let mut import = Some(HashMap::new()); // None => load all
let mut reexport = HashMap::new();
for attr in &item.attrs {
let mut used = true;
match &attr.name()[] {
"phase" => {
self.sess.span_err(attr.span, "#[phase] is deprecated");
}
"plugin" => {
self.sess.span_err(attr.span, "#[plugin] on `extern crate` is deprecated");
self.sess.span_help(attr.span, &format!("use a crate attribute instead, \
i.e. #![plugin({})]",
item.ident.as_str())[]);
}
"macro_use" => {
let names = attr.meta_item_list();
if names.is_none() {
// no names => load all
import = None;
}
if let (Some(sel), Some(names)) = (import.as_mut(), names) {
for attr in names {
if let ast::MetaWord(ref name) = attr.node {
sel.insert(name.clone(), attr.span);
} else {
self.sess.span_err(attr.span, "bad macro import");
}
}
}
}
"macro_reexport" => {
let names = match attr.meta_item_list() {
Some(names) => names,
None => {
self.sess.span_err(attr.span, "bad macro reexport");
continue;
}
};
for attr in names {
if let ast::MetaWord(ref name) = attr.node {
reexport.insert(name.clone(), attr.span);
} else {
self.sess.span_err(attr.span, "bad macro reexport");
}
}
}
_ => used = false,
}
if used {
attr::mark_used(attr);
}
}
self.load_macros(item, import, reexport)
}
fn visit_mac(&mut self, _: &ast::Mac) {
// bummer... can't see macro imports inside macros.
// do nothing.
}
}
impl<'a> MacroLoader<'a> {
fn load_macros<'b>(&mut self,
vi: &ast::Item,
import: Option<MacroSelection>,
reexport: MacroSelection) {
if let Some(sel) = import.as_ref() {
if sel.is_empty() && reexport.is_empty() |
}
if!self.span_whitelist.contains(&vi.span) {
self.sess.span_err(vi.span, "an `extern crate` loading macros must be at \
the crate root");
return;
}
let macros = self.reader.read_exported_macros(vi);
let mut seen = HashSet::new();
for mut def in macros {
let name = token::get_ident(def.ident);
seen.insert(name.clone());
def.use_locally = match import.as_ref() {
None => true,
Some(sel) => sel.contains_key(&name),
};
def.export = reexport.contains_key(&name);
self.macros.push(def);
}
if let Some(sel) = import.as_ref() {
for (name, span) in sel.iter() {
if!seen.contains(name) {
self.sess.span_err(*span, "imported macro not found");
}
}
}
for (name, span) in reexport.iter() {
if!seen.contains(name) {
self.sess.span_err(*span, "reexported macro not found");
}
}
}
}
| {
return;
} | conditional_block |
macro_import.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Used by `rustc` when loading a crate with exported macros.
use session::Session;
use metadata::creader::CrateReader;
use std::collections::{HashSet, HashMap};
use syntax::ast;
use syntax::attr;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit;
use syntax::visit::Visitor;
use syntax::attr::AttrMetaMethods;
struct MacroLoader<'a> {
sess: &'a Session,
span_whitelist: HashSet<Span>,
reader: CrateReader<'a>,
macros: Vec<ast::MacroDef>,
}
impl<'a> MacroLoader<'a> {
fn | (sess: &'a Session) -> MacroLoader<'a> {
MacroLoader {
sess: sess,
span_whitelist: HashSet::new(),
reader: CrateReader::new(sess),
macros: vec![],
}
}
}
/// Read exported macros.
pub fn read_macro_defs(sess: &Session, krate: &ast::Crate) -> Vec<ast::MacroDef> {
let mut loader = MacroLoader::new(sess);
// We need to error on `#[macro_use] extern crate` when it isn't at the
// crate root, because `$crate` won't work properly. Identify these by
// spans, because the crate map isn't set up yet.
for item in &krate.module.items {
if let ast::ItemExternCrate(_) = item.node {
loader.span_whitelist.insert(item.span);
}
}
visit::walk_crate(&mut loader, krate);
loader.macros
}
pub type MacroSelection = HashMap<token::InternedString, Span>;
// note that macros aren't expanded yet, and therefore macros can't add macro imports.
impl<'a, 'v> Visitor<'v> for MacroLoader<'a> {
fn visit_item(&mut self, item: &ast::Item) {
// We're only interested in `extern crate`.
match item.node {
ast::ItemExternCrate(_) => {}
_ => {
visit::walk_item(self, item);
return;
}
}
// Parse the attributes relating to macros.
let mut import = Some(HashMap::new()); // None => load all
let mut reexport = HashMap::new();
for attr in &item.attrs {
let mut used = true;
match &attr.name()[] {
"phase" => {
self.sess.span_err(attr.span, "#[phase] is deprecated");
}
"plugin" => {
self.sess.span_err(attr.span, "#[plugin] on `extern crate` is deprecated");
self.sess.span_help(attr.span, &format!("use a crate attribute instead, \
i.e. #![plugin({})]",
item.ident.as_str())[]);
}
"macro_use" => {
let names = attr.meta_item_list();
if names.is_none() {
// no names => load all
import = None;
}
if let (Some(sel), Some(names)) = (import.as_mut(), names) {
for attr in names {
if let ast::MetaWord(ref name) = attr.node {
sel.insert(name.clone(), attr.span);
} else {
self.sess.span_err(attr.span, "bad macro import");
}
}
}
}
"macro_reexport" => {
let names = match attr.meta_item_list() {
Some(names) => names,
None => {
self.sess.span_err(attr.span, "bad macro reexport");
continue;
}
};
for attr in names {
if let ast::MetaWord(ref name) = attr.node {
reexport.insert(name.clone(), attr.span);
} else {
self.sess.span_err(attr.span, "bad macro reexport");
}
}
}
_ => used = false,
}
if used {
attr::mark_used(attr);
}
}
self.load_macros(item, import, reexport)
}
fn visit_mac(&mut self, _: &ast::Mac) {
// bummer... can't see macro imports inside macros.
// do nothing.
}
}
impl<'a> MacroLoader<'a> {
fn load_macros<'b>(&mut self,
vi: &ast::Item,
import: Option<MacroSelection>,
reexport: MacroSelection) {
if let Some(sel) = import.as_ref() {
if sel.is_empty() && reexport.is_empty() {
return;
}
}
if!self.span_whitelist.contains(&vi.span) {
self.sess.span_err(vi.span, "an `extern crate` loading macros must be at \
the crate root");
return;
}
let macros = self.reader.read_exported_macros(vi);
let mut seen = HashSet::new();
for mut def in macros {
let name = token::get_ident(def.ident);
seen.insert(name.clone());
def.use_locally = match import.as_ref() {
None => true,
Some(sel) => sel.contains_key(&name),
};
def.export = reexport.contains_key(&name);
self.macros.push(def);
}
if let Some(sel) = import.as_ref() {
for (name, span) in sel.iter() {
if!seen.contains(name) {
self.sess.span_err(*span, "imported macro not found");
}
}
}
for (name, span) in reexport.iter() {
if!seen.contains(name) {
self.sess.span_err(*span, "reexported macro not found");
}
}
}
}
| new | identifier_name |
macro_import.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT | // http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Used by `rustc` when loading a crate with exported macros.
use session::Session;
use metadata::creader::CrateReader;
use std::collections::{HashSet, HashMap};
use syntax::ast;
use syntax::attr;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit;
use syntax::visit::Visitor;
use syntax::attr::AttrMetaMethods;
struct MacroLoader<'a> {
sess: &'a Session,
span_whitelist: HashSet<Span>,
reader: CrateReader<'a>,
macros: Vec<ast::MacroDef>,
}
impl<'a> MacroLoader<'a> {
fn new(sess: &'a Session) -> MacroLoader<'a> {
MacroLoader {
sess: sess,
span_whitelist: HashSet::new(),
reader: CrateReader::new(sess),
macros: vec![],
}
}
}
/// Read exported macros.
pub fn read_macro_defs(sess: &Session, krate: &ast::Crate) -> Vec<ast::MacroDef> {
let mut loader = MacroLoader::new(sess);
// We need to error on `#[macro_use] extern crate` when it isn't at the
// crate root, because `$crate` won't work properly. Identify these by
// spans, because the crate map isn't set up yet.
for item in &krate.module.items {
if let ast::ItemExternCrate(_) = item.node {
loader.span_whitelist.insert(item.span);
}
}
visit::walk_crate(&mut loader, krate);
loader.macros
}
pub type MacroSelection = HashMap<token::InternedString, Span>;
// note that macros aren't expanded yet, and therefore macros can't add macro imports.
impl<'a, 'v> Visitor<'v> for MacroLoader<'a> {
fn visit_item(&mut self, item: &ast::Item) {
// We're only interested in `extern crate`.
match item.node {
ast::ItemExternCrate(_) => {}
_ => {
visit::walk_item(self, item);
return;
}
}
// Parse the attributes relating to macros.
let mut import = Some(HashMap::new()); // None => load all
let mut reexport = HashMap::new();
for attr in &item.attrs {
let mut used = true;
match &attr.name()[] {
"phase" => {
self.sess.span_err(attr.span, "#[phase] is deprecated");
}
"plugin" => {
self.sess.span_err(attr.span, "#[plugin] on `extern crate` is deprecated");
self.sess.span_help(attr.span, &format!("use a crate attribute instead, \
i.e. #![plugin({})]",
item.ident.as_str())[]);
}
"macro_use" => {
let names = attr.meta_item_list();
if names.is_none() {
// no names => load all
import = None;
}
if let (Some(sel), Some(names)) = (import.as_mut(), names) {
for attr in names {
if let ast::MetaWord(ref name) = attr.node {
sel.insert(name.clone(), attr.span);
} else {
self.sess.span_err(attr.span, "bad macro import");
}
}
}
}
"macro_reexport" => {
let names = match attr.meta_item_list() {
Some(names) => names,
None => {
self.sess.span_err(attr.span, "bad macro reexport");
continue;
}
};
for attr in names {
if let ast::MetaWord(ref name) = attr.node {
reexport.insert(name.clone(), attr.span);
} else {
self.sess.span_err(attr.span, "bad macro reexport");
}
}
}
_ => used = false,
}
if used {
attr::mark_used(attr);
}
}
self.load_macros(item, import, reexport)
}
fn visit_mac(&mut self, _: &ast::Mac) {
// bummer... can't see macro imports inside macros.
// do nothing.
}
}
impl<'a> MacroLoader<'a> {
fn load_macros<'b>(&mut self,
vi: &ast::Item,
import: Option<MacroSelection>,
reexport: MacroSelection) {
if let Some(sel) = import.as_ref() {
if sel.is_empty() && reexport.is_empty() {
return;
}
}
if!self.span_whitelist.contains(&vi.span) {
self.sess.span_err(vi.span, "an `extern crate` loading macros must be at \
the crate root");
return;
}
let macros = self.reader.read_exported_macros(vi);
let mut seen = HashSet::new();
for mut def in macros {
let name = token::get_ident(def.ident);
seen.insert(name.clone());
def.use_locally = match import.as_ref() {
None => true,
Some(sel) => sel.contains_key(&name),
};
def.export = reexport.contains_key(&name);
self.macros.push(def);
}
if let Some(sel) = import.as_ref() {
for (name, span) in sel.iter() {
if!seen.contains(name) {
self.sess.span_err(*span, "imported macro not found");
}
}
}
for (name, span) in reexport.iter() {
if!seen.contains(name) {
self.sess.span_err(*span, "reexported macro not found");
}
}
}
} | // file at the top-level directory of this distribution and at | random_line_split |
binary_tree.rs | // Copyright (c) 2015 Takeru Ohta <[email protected]>
//
// This software is released under the MIT License,
// see the LICENSE file at the top-level directory.
extern crate dawg;
use dawg::binary_tree::Builder;
#[test]
fn build() |
#[test]
fn search_common_prefix() {
let trie = words()
.iter()
.fold(Builder::new(), |mut b, w| {
b.insert(w.bytes()).ok().unwrap();
b
})
.finish();
assert_eq!(0, trie.search_common_prefix("hoge".bytes()).count());
assert_eq!(vec![(0, 3)],
trie.search_common_prefix("abc".bytes()).collect::<Vec<_>>());
assert_eq!(vec![(4, 2), (5, 4)],
trie.search_common_prefix("cddrr".bytes()).collect::<Vec<_>>());
}
fn words() -> [&'static str; 7] {
["abc", "b", "bbb", "car", "cd", "cddr", "cdr"]
}
| {
let mut b = Builder::new();
for w in words().iter() {
assert!(b.insert(w.bytes()).is_ok());
}
assert_eq!(words().len(), b.finish().len());
} | identifier_body |
binary_tree.rs | // Copyright (c) 2015 Takeru Ohta <[email protected]>
//
// This software is released under the MIT License,
// see the LICENSE file at the top-level directory.
extern crate dawg;
use dawg::binary_tree::Builder; | let mut b = Builder::new();
for w in words().iter() {
assert!(b.insert(w.bytes()).is_ok());
}
assert_eq!(words().len(), b.finish().len());
}
#[test]
fn search_common_prefix() {
let trie = words()
.iter()
.fold(Builder::new(), |mut b, w| {
b.insert(w.bytes()).ok().unwrap();
b
})
.finish();
assert_eq!(0, trie.search_common_prefix("hoge".bytes()).count());
assert_eq!(vec![(0, 3)],
trie.search_common_prefix("abc".bytes()).collect::<Vec<_>>());
assert_eq!(vec![(4, 2), (5, 4)],
trie.search_common_prefix("cddrr".bytes()).collect::<Vec<_>>());
}
fn words() -> [&'static str; 7] {
["abc", "b", "bbb", "car", "cd", "cddr", "cdr"]
} |
#[test]
fn build() { | random_line_split |
binary_tree.rs | // Copyright (c) 2015 Takeru Ohta <[email protected]>
//
// This software is released under the MIT License,
// see the LICENSE file at the top-level directory.
extern crate dawg;
use dawg::binary_tree::Builder;
#[test]
fn | () {
let mut b = Builder::new();
for w in words().iter() {
assert!(b.insert(w.bytes()).is_ok());
}
assert_eq!(words().len(), b.finish().len());
}
#[test]
fn search_common_prefix() {
let trie = words()
.iter()
.fold(Builder::new(), |mut b, w| {
b.insert(w.bytes()).ok().unwrap();
b
})
.finish();
assert_eq!(0, trie.search_common_prefix("hoge".bytes()).count());
assert_eq!(vec![(0, 3)],
trie.search_common_prefix("abc".bytes()).collect::<Vec<_>>());
assert_eq!(vec![(4, 2), (5, 4)],
trie.search_common_prefix("cddrr".bytes()).collect::<Vec<_>>());
}
fn words() -> [&'static str; 7] {
["abc", "b", "bbb", "car", "cd", "cddr", "cdr"]
}
| build | identifier_name |
euclidext.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use euclid::default::{Rect, Size2D};
pub trait Size2DExt {
fn to_u64(&self) -> Size2D<u64>;
}
impl Size2DExt for Size2D<f32> {
fn to_u64(&self) -> Size2D<u64> |
}
impl Size2DExt for Size2D<f64> {
fn to_u64(&self) -> Size2D<u64> {
self.cast()
}
}
impl Size2DExt for Size2D<u32> {
fn to_u64(&self) -> Size2D<u64> {
self.cast()
}
}
pub trait RectExt {
fn to_u64(&self) -> Rect<u64>;
}
impl RectExt for Rect<f64> {
fn to_u64(&self) -> Rect<u64> {
self.cast()
}
}
impl RectExt for Rect<u32> {
fn to_u64(&self) -> Rect<u64> {
self.cast()
}
}
| {
self.cast()
} | identifier_body |
euclidext.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
impl Size2DExt for Size2D<f32> {
fn to_u64(&self) -> Size2D<u64> {
self.cast()
}
}
impl Size2DExt for Size2D<f64> {
fn to_u64(&self) -> Size2D<u64> {
self.cast()
}
}
impl Size2DExt for Size2D<u32> {
fn to_u64(&self) -> Size2D<u64> {
self.cast()
}
}
pub trait RectExt {
fn to_u64(&self) -> Rect<u64>;
}
impl RectExt for Rect<f64> {
fn to_u64(&self) -> Rect<u64> {
self.cast()
}
}
impl RectExt for Rect<u32> {
fn to_u64(&self) -> Rect<u64> {
self.cast()
}
} | use euclid::default::{Rect, Size2D};
pub trait Size2DExt {
fn to_u64(&self) -> Size2D<u64>;
} | random_line_split |
euclidext.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use euclid::default::{Rect, Size2D};
pub trait Size2DExt {
fn to_u64(&self) -> Size2D<u64>;
}
impl Size2DExt for Size2D<f32> {
fn to_u64(&self) -> Size2D<u64> {
self.cast()
}
}
impl Size2DExt for Size2D<f64> {
fn | (&self) -> Size2D<u64> {
self.cast()
}
}
impl Size2DExt for Size2D<u32> {
fn to_u64(&self) -> Size2D<u64> {
self.cast()
}
}
pub trait RectExt {
fn to_u64(&self) -> Rect<u64>;
}
impl RectExt for Rect<f64> {
fn to_u64(&self) -> Rect<u64> {
self.cast()
}
}
impl RectExt for Rect<u32> {
fn to_u64(&self) -> Rect<u64> {
self.cast()
}
}
| to_u64 | identifier_name |
active_object.rs | // Implements http://rosettacode.org/wiki/Active_object
#![feature(std_misc)]
extern crate time;
extern crate num;
extern crate schedule_recv;
use num::traits::Zero;
use num::Float;
use std::f64::consts::PI;
use std::sync::{Arc, Mutex};
use std::time::duration::Duration;
use std::thread::{self, spawn};
use std::sync::mpsc::{channel, Sender, SendError};
use std::ops::Mul;
use schedule_recv::periodic_ms; |
// Rust supports both shared-memory and actor models of concurrency, and the Integrator utilizes
// both. We use a Sender (actor model) to send the Integrator new functions, while we use a Mutex
// (shared-memory concurrency) to hold the result of the integration.
//
// Note that these are not the only options here--there are many, many ways you can deal with
// concurrent access. But when in doubt, a plain old Mutex is often a good bet. For example, this
// might look like a good situation for a RwLock--after all, there's no reason for a read in the
// main task to block writes. Unfortunately, unless you have significantly more reads than
// writes (which is certainly not the case here), a Mutex will usually outperform a RwLock.
pub struct Integrator<S:'static, T: Send> {
input: Sender<Box<Fn(u32) -> S + Send>>,
output: Arc<Mutex<T>>,
}
// In Rust, time durations are strongly typed. This is usually exactly what you want, but for a
// problem like this--where the integrated value has unusual (unspecified?) units--it can actually
// be a bit tricky. Right now, Durations can only be multiplied or divided by i32s, so in order to
// be able to actually do math with them we say that the type parameter S (the result of the
// function being integrated) must yield T (the type of the integrated value) when multiplied by
// f64. We could possibly replace f64 with a generic as well, but it would make things a bit more
// complex.
impl<S: Mul<f64, Output=T> + Float + Zero,
T:'static + Clone + Send + Float> Integrator<S, T> {
pub fn new(frequency: u32) -> Integrator<S, T> {
// We create a pipe allowing functions to be sent from tx (the sending end) to input (the
// receiving end). In order to change the function we are integrating from the task in
// which the Integrator lives, we simply send the function through tx.
let (tx, input) = channel();
// The easiest way to do shared-memory concurrency in Rust is to use atomic reference
// counting, or Arc, around a synchronized type (like Mutex<T>). Arc gives you a guarantee
// that memory will not be freed as long as there is at least one reference to it.
// It is similar to C++'s shared_ptr, but it is guaranteed to be safe and is never
// incremented unless explicitly cloned (by default, it is moved).
let s: Arc<Mutex<T>> = Arc::new(Mutex::new(Zero::zero()));
let integrator = Integrator {
input: tx,
// Here is the aforementioned clone. We have to do it before s enters the closure,
// because once that happens it is moved into the closure (and later, the new task) and
// becomes inaccessible to the outside world.
output: s.clone(),
};
spawn(move || -> () {
// The frequency is how often we want to "tick" as we update our integrated total. In
// Rust, timers can yield Receivers that are periodically notified with an empty
// message (where the period is the frequency). This is useful because it lets us wait
// on either a tick or another type of message (in this case, a request to change the
// function we are integrating).
let periodic = periodic_ms(frequency);
let mut t = 0;
let mut k: Box<Fn(u32) -> S + Send> = Box::new(|_| Zero::zero());
let mut k_0: S = Zero::zero();
loop {
// Here's the selection we talked about above. Note that we are careful to call
// the *non*-failing function, recv(), here. The reason we do this is because
// recv() will return Err when the sending end of a channel is dropped. While
// this is unlikely to happen for the timer (so again, you could argue for failure
// there), it's normal behavior for the sending end of input to be dropped, since
// it just happens when the Integrator falls out of scope. So we handle it cleanly
// and break out of the loop, rather than failing.
select! {
res = periodic.recv() => match res {
Ok(_) => {
t += frequency;
let k_1: S = k(t);
// Rust Mutexes are a bit different from Mutexes in many other
// languages, in that the protected data is actually encapsulated by
// the Mutex. The reason for this is that Rust is actually capable of
// enforcing (via its borrow checker) the invariant that the contents
// of a Mutex may only be read when you have acquired its lock. This
// is enforced by way of a MutexGuard, the return value of lock(),
// which implements some special traits (Deref and DerefMut) that allow
// access to the inner element "through" the guard. The element so
// acquired has a lifetime bounded by that of the MutexGuard, the
// MutexGuard can only be acquired by taking a lock, and the only way
// to release the lock is by letting the MutexGuard fall out of scope,
// so it's impossible to access the data incorrectly. There are some
// additional subtleties around the actual implementation, but that's
// the basic idea.
let mut s = s.lock().unwrap();
*s = *s + (k_1 + k_0) * (frequency as f64 / 2.);
k_0 = k_1;
}
Err(_) => break,
},
res = input.recv() => match res {
Ok(k_new) => k = k_new,
Err(_) => break,
}
}
}
});
integrator
}
pub fn input(&self, k: Box<Fn(u32) -> S + Send>) ->
Result<(), SendError<Box<Fn(u32) -> S + Send>>> {
// The meat of the work is done in the other thread, so to set the
// input we just send along the Sender we set earlier...
self.input.send(k)
}
pub fn output(&self) -> T {
//...and to read the input, we simply acquire a lock on the output Mutex and return a
// clone. Why do we have to clone it? Because, as mentioned above, Rust won't let us
// retain access to the interior of the Mutex unless we have possession of its lock. There
// are ways and circumstances in which one can avoid this (e.g. by using atomic types) but
// clone() is a perfectly reasonable solution as well, and a lot easier to reason about :)
self.output.lock().unwrap().clone()
}
}
// This function is fairly straightforward. We create the integrator, set its input function k(t)
// to 2pi * f * t, and then wait as described in the Rosetta stone problem.
#[cfg(not(test))]
fn integrate() -> f64 {
let object = Integrator::new(10);
object.input(Box::new(|t: u32| {
let f = 1. / Duration::seconds(2).num_milliseconds() as f64;
(2. * PI * f * t as f64).sin()
})).ok().expect("Failed to set input");
thread::sleep_ms(2000);
object.input(Box::new(|_| 0.)).ok().expect("Failed to set input");
thread::sleep_ms(500);
object.output()
}
#[cfg(not(test))]
fn main() {
println!("{}", integrate());
}
#[test]
//#[ignore] // Will fail on a heavily loaded machine
fn solution() {
// We should just be able to call integrate, but can't represent the closure properly due to
// rust-lang/rust issue #17060 if we make frequency or period a variable.
// FIXME(pythonesque): When unboxed closures are fixed, fix integrate() to take two arguments.
let object = Integrator::new(10);
object.input(Box::new(|t: u32| {
let f = 1. / (Duration::seconds(2) / 10).num_milliseconds() as f64;
(2. * PI * f * t as f64).sin()
})).ok().expect("Failed to set input");
thread::sleep_ms(200);
object.input(Box::new(|_| 0.)).ok().expect("Failed to set input");
thread::sleep_ms(100);
assert_eq!(object.output() as u32, 0)
} | random_line_split |
|
active_object.rs | // Implements http://rosettacode.org/wiki/Active_object
#![feature(std_misc)]
extern crate time;
extern crate num;
extern crate schedule_recv;
use num::traits::Zero;
use num::Float;
use std::f64::consts::PI;
use std::sync::{Arc, Mutex};
use std::time::duration::Duration;
use std::thread::{self, spawn};
use std::sync::mpsc::{channel, Sender, SendError};
use std::ops::Mul;
use schedule_recv::periodic_ms;
// Rust supports both shared-memory and actor models of concurrency, and the Integrator utilizes
// both. We use a Sender (actor model) to send the Integrator new functions, while we use a Mutex
// (shared-memory concurrency) to hold the result of the integration.
//
// Note that these are not the only options here--there are many, many ways you can deal with
// concurrent access. But when in doubt, a plain old Mutex is often a good bet. For example, this
// might look like a good situation for a RwLock--after all, there's no reason for a read in the
// main task to block writes. Unfortunately, unless you have significantly more reads than
// writes (which is certainly not the case here), a Mutex will usually outperform a RwLock.
pub struct Integrator<S:'static, T: Send> {
input: Sender<Box<Fn(u32) -> S + Send>>,
output: Arc<Mutex<T>>,
}
// In Rust, time durations are strongly typed. This is usually exactly what you want, but for a
// problem like this--where the integrated value has unusual (unspecified?) units--it can actually
// be a bit tricky. Right now, Durations can only be multiplied or divided by i32s, so in order to
// be able to actually do math with them we say that the type parameter S (the result of the
// function being integrated) must yield T (the type of the integrated value) when multiplied by
// f64. We could possibly replace f64 with a generic as well, but it would make things a bit more
// complex.
impl<S: Mul<f64, Output=T> + Float + Zero,
T:'static + Clone + Send + Float> Integrator<S, T> {
pub fn new(frequency: u32) -> Integrator<S, T> | // Rust, timers can yield Receivers that are periodically notified with an empty
// message (where the period is the frequency). This is useful because it lets us wait
// on either a tick or another type of message (in this case, a request to change the
// function we are integrating).
let periodic = periodic_ms(frequency);
let mut t = 0;
let mut k: Box<Fn(u32) -> S + Send> = Box::new(|_| Zero::zero());
let mut k_0: S = Zero::zero();
loop {
// Here's the selection we talked about above. Note that we are careful to call
// the *non*-failing function, recv(), here. The reason we do this is because
// recv() will return Err when the sending end of a channel is dropped. While
// this is unlikely to happen for the timer (so again, you could argue for failure
// there), it's normal behavior for the sending end of input to be dropped, since
// it just happens when the Integrator falls out of scope. So we handle it cleanly
// and break out of the loop, rather than failing.
select! {
res = periodic.recv() => match res {
Ok(_) => {
t += frequency;
let k_1: S = k(t);
// Rust Mutexes are a bit different from Mutexes in many other
// languages, in that the protected data is actually encapsulated by
// the Mutex. The reason for this is that Rust is actually capable of
// enforcing (via its borrow checker) the invariant that the contents
// of a Mutex may only be read when you have acquired its lock. This
// is enforced by way of a MutexGuard, the return value of lock(),
// which implements some special traits (Deref and DerefMut) that allow
// access to the inner element "through" the guard. The element so
// acquired has a lifetime bounded by that of the MutexGuard, the
// MutexGuard can only be acquired by taking a lock, and the only way
// to release the lock is by letting the MutexGuard fall out of scope,
// so it's impossible to access the data incorrectly. There are some
// additional subtleties around the actual implementation, but that's
// the basic idea.
let mut s = s.lock().unwrap();
*s = *s + (k_1 + k_0) * (frequency as f64 / 2.);
k_0 = k_1;
}
Err(_) => break,
},
res = input.recv() => match res {
Ok(k_new) => k = k_new,
Err(_) => break,
}
}
}
});
integrator
}
pub fn input(&self, k: Box<Fn(u32) -> S + Send>) ->
Result<(), SendError<Box<Fn(u32) -> S + Send>>> {
// The meat of the work is done in the other thread, so to set the
// input we just send along the Sender we set earlier...
self.input.send(k)
}
pub fn output(&self) -> T {
//...and to read the input, we simply acquire a lock on the output Mutex and return a
// clone. Why do we have to clone it? Because, as mentioned above, Rust won't let us
// retain access to the interior of the Mutex unless we have possession of its lock. There
// are ways and circumstances in which one can avoid this (e.g. by using atomic types) but
// clone() is a perfectly reasonable solution as well, and a lot easier to reason about :)
self.output.lock().unwrap().clone()
}
}
// This function is fairly straightforward. We create the integrator, set its input function k(t)
// to 2pi * f * t, and then wait as described in the Rosetta stone problem.
#[cfg(not(test))]
fn integrate() -> f64 {
let object = Integrator::new(10);
object.input(Box::new(|t: u32| {
let f = 1. / Duration::seconds(2).num_milliseconds() as f64;
(2. * PI * f * t as f64).sin()
})).ok().expect("Failed to set input");
thread::sleep_ms(2000);
object.input(Box::new(|_| 0.)).ok().expect("Failed to set input");
thread::sleep_ms(500);
object.output()
}
#[cfg(not(test))]
fn main() {
println!("{}", integrate());
}
#[test]
//#[ignore] // Will fail on a heavily loaded machine
fn solution() {
// We should just be able to call integrate, but can't represent the closure properly due to
// rust-lang/rust issue #17060 if we make frequency or period a variable.
// FIXME(pythonesque): When unboxed closures are fixed, fix integrate() to take two arguments.
let object = Integrator::new(10);
object.input(Box::new(|t: u32| {
let f = 1. / (Duration::seconds(2) / 10).num_milliseconds() as f64;
(2. * PI * f * t as f64).sin()
})).ok().expect("Failed to set input");
thread::sleep_ms(200);
object.input(Box::new(|_| 0.)).ok().expect("Failed to set input");
thread::sleep_ms(100);
assert_eq!(object.output() as u32, 0)
}
| {
// We create a pipe allowing functions to be sent from tx (the sending end) to input (the
// receiving end). In order to change the function we are integrating from the task in
// which the Integrator lives, we simply send the function through tx.
let (tx, input) = channel();
// The easiest way to do shared-memory concurrency in Rust is to use atomic reference
// counting, or Arc, around a synchronized type (like Mutex<T>). Arc gives you a guarantee
// that memory will not be freed as long as there is at least one reference to it.
// It is similar to C++'s shared_ptr, but it is guaranteed to be safe and is never
// incremented unless explicitly cloned (by default, it is moved).
let s: Arc<Mutex<T>> = Arc::new(Mutex::new(Zero::zero()));
let integrator = Integrator {
input: tx,
// Here is the aforementioned clone. We have to do it before s enters the closure,
// because once that happens it is moved into the closure (and later, the new task) and
// becomes inaccessible to the outside world.
output: s.clone(),
};
spawn(move || -> () {
// The frequency is how often we want to "tick" as we update our integrated total. In | identifier_body |
active_object.rs | // Implements http://rosettacode.org/wiki/Active_object
#![feature(std_misc)]
extern crate time;
extern crate num;
extern crate schedule_recv;
use num::traits::Zero;
use num::Float;
use std::f64::consts::PI;
use std::sync::{Arc, Mutex};
use std::time::duration::Duration;
use std::thread::{self, spawn};
use std::sync::mpsc::{channel, Sender, SendError};
use std::ops::Mul;
use schedule_recv::periodic_ms;
// Rust supports both shared-memory and actor models of concurrency, and the Integrator utilizes
// both. We use a Sender (actor model) to send the Integrator new functions, while we use a Mutex
// (shared-memory concurrency) to hold the result of the integration.
//
// Note that these are not the only options here--there are many, many ways you can deal with
// concurrent access. But when in doubt, a plain old Mutex is often a good bet. For example, this
// might look like a good situation for a RwLock--after all, there's no reason for a read in the
// main task to block writes. Unfortunately, unless you have significantly more reads than
// writes (which is certainly not the case here), a Mutex will usually outperform a RwLock.
pub struct Integrator<S:'static, T: Send> {
input: Sender<Box<Fn(u32) -> S + Send>>,
output: Arc<Mutex<T>>,
}
// In Rust, time durations are strongly typed. This is usually exactly what you want, but for a
// problem like this--where the integrated value has unusual (unspecified?) units--it can actually
// be a bit tricky. Right now, Durations can only be multiplied or divided by i32s, so in order to
// be able to actually do math with them we say that the type parameter S (the result of the
// function being integrated) must yield T (the type of the integrated value) when multiplied by
// f64. We could possibly replace f64 with a generic as well, but it would make things a bit more
// complex.
impl<S: Mul<f64, Output=T> + Float + Zero,
T:'static + Clone + Send + Float> Integrator<S, T> {
pub fn new(frequency: u32) -> Integrator<S, T> {
// We create a pipe allowing functions to be sent from tx (the sending end) to input (the
// receiving end). In order to change the function we are integrating from the task in
// which the Integrator lives, we simply send the function through tx.
let (tx, input) = channel();
// The easiest way to do shared-memory concurrency in Rust is to use atomic reference
// counting, or Arc, around a synchronized type (like Mutex<T>). Arc gives you a guarantee
// that memory will not be freed as long as there is at least one reference to it.
// It is similar to C++'s shared_ptr, but it is guaranteed to be safe and is never
// incremented unless explicitly cloned (by default, it is moved).
let s: Arc<Mutex<T>> = Arc::new(Mutex::new(Zero::zero()));
let integrator = Integrator {
input: tx,
// Here is the aforementioned clone. We have to do it before s enters the closure,
// because once that happens it is moved into the closure (and later, the new task) and
// becomes inaccessible to the outside world.
output: s.clone(),
};
spawn(move || -> () {
// The frequency is how often we want to "tick" as we update our integrated total. In
// Rust, timers can yield Receivers that are periodically notified with an empty
// message (where the period is the frequency). This is useful because it lets us wait
// on either a tick or another type of message (in this case, a request to change the
// function we are integrating).
let periodic = periodic_ms(frequency);
let mut t = 0;
let mut k: Box<Fn(u32) -> S + Send> = Box::new(|_| Zero::zero());
let mut k_0: S = Zero::zero();
loop {
// Here's the selection we talked about above. Note that we are careful to call
// the *non*-failing function, recv(), here. The reason we do this is because
// recv() will return Err when the sending end of a channel is dropped. While
// this is unlikely to happen for the timer (so again, you could argue for failure
// there), it's normal behavior for the sending end of input to be dropped, since
// it just happens when the Integrator falls out of scope. So we handle it cleanly
// and break out of the loop, rather than failing.
select! {
res = periodic.recv() => match res {
Ok(_) => {
t += frequency;
let k_1: S = k(t);
// Rust Mutexes are a bit different from Mutexes in many other
// languages, in that the protected data is actually encapsulated by
// the Mutex. The reason for this is that Rust is actually capable of
// enforcing (via its borrow checker) the invariant that the contents
// of a Mutex may only be read when you have acquired its lock. This
// is enforced by way of a MutexGuard, the return value of lock(),
// which implements some special traits (Deref and DerefMut) that allow
// access to the inner element "through" the guard. The element so
// acquired has a lifetime bounded by that of the MutexGuard, the
// MutexGuard can only be acquired by taking a lock, and the only way
// to release the lock is by letting the MutexGuard fall out of scope,
// so it's impossible to access the data incorrectly. There are some
// additional subtleties around the actual implementation, but that's
// the basic idea.
let mut s = s.lock().unwrap();
*s = *s + (k_1 + k_0) * (frequency as f64 / 2.);
k_0 = k_1;
}
Err(_) => break,
},
res = input.recv() => match res {
Ok(k_new) => k = k_new,
Err(_) => break,
}
}
}
});
integrator
}
pub fn input(&self, k: Box<Fn(u32) -> S + Send>) ->
Result<(), SendError<Box<Fn(u32) -> S + Send>>> {
// The meat of the work is done in the other thread, so to set the
// input we just send along the Sender we set earlier...
self.input.send(k)
}
pub fn output(&self) -> T {
//...and to read the input, we simply acquire a lock on the output Mutex and return a
// clone. Why do we have to clone it? Because, as mentioned above, Rust won't let us
// retain access to the interior of the Mutex unless we have possession of its lock. There
// are ways and circumstances in which one can avoid this (e.g. by using atomic types) but
// clone() is a perfectly reasonable solution as well, and a lot easier to reason about :)
self.output.lock().unwrap().clone()
}
}
// This function is fairly straightforward. We create the integrator, set its input function k(t)
// to 2pi * f * t, and then wait as described in the Rosetta stone problem.
#[cfg(not(test))]
fn | () -> f64 {
let object = Integrator::new(10);
object.input(Box::new(|t: u32| {
let f = 1. / Duration::seconds(2).num_milliseconds() as f64;
(2. * PI * f * t as f64).sin()
})).ok().expect("Failed to set input");
thread::sleep_ms(2000);
object.input(Box::new(|_| 0.)).ok().expect("Failed to set input");
thread::sleep_ms(500);
object.output()
}
#[cfg(not(test))]
fn main() {
println!("{}", integrate());
}
#[test]
//#[ignore] // Will fail on a heavily loaded machine
fn solution() {
// We should just be able to call integrate, but can't represent the closure properly due to
// rust-lang/rust issue #17060 if we make frequency or period a variable.
// FIXME(pythonesque): When unboxed closures are fixed, fix integrate() to take two arguments.
let object = Integrator::new(10);
object.input(Box::new(|t: u32| {
let f = 1. / (Duration::seconds(2) / 10).num_milliseconds() as f64;
(2. * PI * f * t as f64).sin()
})).ok().expect("Failed to set input");
thread::sleep_ms(200);
object.input(Box::new(|_| 0.)).ok().expect("Failed to set input");
thread::sleep_ms(100);
assert_eq!(object.output() as u32, 0)
}
| integrate | identifier_name |
tcp.rs | use traits::*;
use rotor::mio::tcp::TcpStream as MioTcpStream;
use rotor::mio::tcp::Shutdown;
use netbuf::Buf;
use std::io;
pub struct TcpStream {
stream: MioTcpStream,
read_buffer: Buf,
}
impl Transport for TcpStream {
type Buffer = MioTcpStream;
/// Returns a buffer object that will write data to the underlying socket. This is used by the
/// Codecs in order to efficiently write data without copying.
fn buffer(&mut self) -> &mut Self::Buffer {
&mut self.stream
}
fn spawned(&mut self) {
debug!("spawned tcp stream");
}
fn closed(&mut self, err: Option<&io::Error>) {
debug!("closing tcp stream");
debug!("transport close: optional error: {:?}", err);
self.stream.shutdown(Shutdown::Both).map_err(|e| {
error!("tcp transport: error closing: {}", e);
});
}
fn read(&mut self) -> io::Result<&[u8]> {
use std::io::ErrorKind::*;
let stream = &mut self.stream;
let buf = &mut self.read_buffer;
loop {
match buf.read_from(stream) {
Ok(_) => {},
Err(e) => {
match e.kind() {
WouldBlock => {
return Ok(&buf[..])
},
Interrupted => {},
_ => {
return Err(e)
},
}
},
}
}
}
/// Tells transport that "bytes" number of bytes have been read
fn consume(&mut self, bytes: usize) {
self.read_buffer.consume(bytes)
}
/// Called when socket changes state to being writable.
fn writable(&mut self) |
}
| {
debug!("writable tcp stream");
} | identifier_body |
tcp.rs | use traits::*;
use rotor::mio::tcp::TcpStream as MioTcpStream;
use rotor::mio::tcp::Shutdown;
use netbuf::Buf;
use std::io;
pub struct TcpStream {
stream: MioTcpStream,
read_buffer: Buf,
}
impl Transport for TcpStream {
type Buffer = MioTcpStream;
/// Returns a buffer object that will write data to the underlying socket. This is used by the
/// Codecs in order to efficiently write data without copying.
fn buffer(&mut self) -> &mut Self::Buffer {
&mut self.stream
}
fn spawned(&mut self) {
debug!("spawned tcp stream");
}
fn closed(&mut self, err: Option<&io::Error>) {
debug!("closing tcp stream");
debug!("transport close: optional error: {:?}", err);
self.stream.shutdown(Shutdown::Both).map_err(|e| {
error!("tcp transport: error closing: {}", e);
});
}
fn read(&mut self) -> io::Result<&[u8]> {
use std::io::ErrorKind::*;
let stream = &mut self.stream;
let buf = &mut self.read_buffer;
loop {
match buf.read_from(stream) {
Ok(_) => {},
Err(e) => {
match e.kind() {
WouldBlock => {
return Ok(&buf[..])
},
Interrupted => | ,
_ => {
return Err(e)
},
}
},
}
}
}
/// Tells transport that "bytes" number of bytes have been read
fn consume(&mut self, bytes: usize) {
self.read_buffer.consume(bytes)
}
/// Called when socket changes state to being writable.
fn writable(&mut self) {
debug!("writable tcp stream");
}
}
| {} | conditional_block |
tcp.rs | use traits::*;
use rotor::mio::tcp::TcpStream as MioTcpStream;
use rotor::mio::tcp::Shutdown;
use netbuf::Buf;
use std::io;
pub struct TcpStream {
stream: MioTcpStream,
read_buffer: Buf,
}
impl Transport for TcpStream {
type Buffer = MioTcpStream;
/// Returns a buffer object that will write data to the underlying socket. This is used by the
/// Codecs in order to efficiently write data without copying.
fn buffer(&mut self) -> &mut Self::Buffer {
&mut self.stream
}
fn spawned(&mut self) {
debug!("spawned tcp stream");
}
fn closed(&mut self, err: Option<&io::Error>) {
debug!("closing tcp stream");
debug!("transport close: optional error: {:?}", err);
self.stream.shutdown(Shutdown::Both).map_err(|e| {
error!("tcp transport: error closing: {}", e);
});
}
| fn read(&mut self) -> io::Result<&[u8]> {
use std::io::ErrorKind::*;
let stream = &mut self.stream;
let buf = &mut self.read_buffer;
loop {
match buf.read_from(stream) {
Ok(_) => {},
Err(e) => {
match e.kind() {
WouldBlock => {
return Ok(&buf[..])
},
Interrupted => {},
_ => {
return Err(e)
},
}
},
}
}
}
/// Tells transport that "bytes" number of bytes have been read
fn consume(&mut self, bytes: usize) {
self.read_buffer.consume(bytes)
}
/// Called when socket changes state to being writable.
fn writable(&mut self) {
debug!("writable tcp stream");
}
} | random_line_split |
|
tcp.rs | use traits::*;
use rotor::mio::tcp::TcpStream as MioTcpStream;
use rotor::mio::tcp::Shutdown;
use netbuf::Buf;
use std::io;
pub struct | {
stream: MioTcpStream,
read_buffer: Buf,
}
impl Transport for TcpStream {
type Buffer = MioTcpStream;
/// Returns a buffer object that will write data to the underlying socket. This is used by the
/// Codecs in order to efficiently write data without copying.
fn buffer(&mut self) -> &mut Self::Buffer {
&mut self.stream
}
fn spawned(&mut self) {
debug!("spawned tcp stream");
}
fn closed(&mut self, err: Option<&io::Error>) {
debug!("closing tcp stream");
debug!("transport close: optional error: {:?}", err);
self.stream.shutdown(Shutdown::Both).map_err(|e| {
error!("tcp transport: error closing: {}", e);
});
}
fn read(&mut self) -> io::Result<&[u8]> {
use std::io::ErrorKind::*;
let stream = &mut self.stream;
let buf = &mut self.read_buffer;
loop {
match buf.read_from(stream) {
Ok(_) => {},
Err(e) => {
match e.kind() {
WouldBlock => {
return Ok(&buf[..])
},
Interrupted => {},
_ => {
return Err(e)
},
}
},
}
}
}
/// Tells transport that "bytes" number of bytes have been read
fn consume(&mut self, bytes: usize) {
self.read_buffer.consume(bytes)
}
/// Called when socket changes state to being writable.
fn writable(&mut self) {
debug!("writable tcp stream");
}
}
| TcpStream | identifier_name |
persistent_list.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A persistent, thread-safe singly-linked list.
use std::sync::Arc;
pub struct PersistentList<T> {
head: PersistentListLink<T>,
length: usize,
}
struct PersistentListEntry<T> {
value: T,
next: PersistentListLink<T>,
}
type PersistentListLink<T> = Option<Arc<PersistentListEntry<T>>>;
impl<T> PersistentList<T> where T: Send + Sync {
#[inline]
pub fn new() -> PersistentList<T> {
PersistentList {
head: None,
length: 0,
}
}
#[inline]
pub fn len(&self) -> usize {
self.length
}
#[inline]
pub fn front(&self) -> Option<&T> {
self.head.as_ref().map(|head| &head.value)
}
#[inline]
pub fn prepend_elem(&self, value: T) -> PersistentList<T> {
PersistentList {
head: Some(Arc::new(PersistentListEntry {
value: value,
next: self.head.clone(),
})),
length: self.length + 1,
}
}
#[inline]
pub fn iter(&self) -> PersistentListIterator<T> {
// This could clone (and would not need the lifetime if it did), but then it would incur
// atomic operations on every call to `.next()`. Bad.
PersistentListIterator {
entry: self.head.as_ref().map(|head| &**head),
}
}
}
impl<T> Clone for PersistentList<T> where T: Send + Sync {
fn clone(&self) -> PersistentList<T> { | }
}
}
pub struct PersistentListIterator<'a, T> where T: 'a + Send + Sync {
entry: Option<&'a PersistentListEntry<T>>,
}
impl<'a, T> Iterator for PersistentListIterator<'a, T> where T: Send + Sync +'static {
type Item = &'a T;
#[inline]
fn next(&mut self) -> Option<&'a T> {
let entry = self.entry?;
let value = &entry.value;
self.entry = match entry.next {
None => None,
Some(ref entry) => Some(&**entry),
};
Some(value)
}
} | // This establishes the persistent nature of this list: we can clone a list by just cloning
// its head.
PersistentList {
head: self.head.clone(),
length: self.length, | random_line_split |
persistent_list.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A persistent, thread-safe singly-linked list.
use std::sync::Arc;
pub struct PersistentList<T> {
head: PersistentListLink<T>,
length: usize,
}
struct PersistentListEntry<T> {
value: T,
next: PersistentListLink<T>,
}
type PersistentListLink<T> = Option<Arc<PersistentListEntry<T>>>;
impl<T> PersistentList<T> where T: Send + Sync {
#[inline]
pub fn new() -> PersistentList<T> {
PersistentList {
head: None,
length: 0,
}
}
#[inline]
pub fn len(&self) -> usize {
self.length
}
#[inline]
pub fn front(&self) -> Option<&T> {
self.head.as_ref().map(|head| &head.value)
}
#[inline]
pub fn prepend_elem(&self, value: T) -> PersistentList<T> {
PersistentList {
head: Some(Arc::new(PersistentListEntry {
value: value,
next: self.head.clone(),
})),
length: self.length + 1,
}
}
#[inline]
pub fn | (&self) -> PersistentListIterator<T> {
// This could clone (and would not need the lifetime if it did), but then it would incur
// atomic operations on every call to `.next()`. Bad.
PersistentListIterator {
entry: self.head.as_ref().map(|head| &**head),
}
}
}
impl<T> Clone for PersistentList<T> where T: Send + Sync {
fn clone(&self) -> PersistentList<T> {
// This establishes the persistent nature of this list: we can clone a list by just cloning
// its head.
PersistentList {
head: self.head.clone(),
length: self.length,
}
}
}
pub struct PersistentListIterator<'a, T> where T: 'a + Send + Sync {
entry: Option<&'a PersistentListEntry<T>>,
}
impl<'a, T> Iterator for PersistentListIterator<'a, T> where T: Send + Sync +'static {
type Item = &'a T;
#[inline]
fn next(&mut self) -> Option<&'a T> {
let entry = self.entry?;
let value = &entry.value;
self.entry = match entry.next {
None => None,
Some(ref entry) => Some(&**entry),
};
Some(value)
}
}
| iter | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![feature(custom_derive, plugin)]
#![plugin(heapsize_plugin, plugins, serde_macros)]
#![deny(missing_docs)]
#![deny(unsafe_code)]
extern crate app_units;
extern crate canvas_traits;
extern crate devtools_traits;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
extern crate ipc_channel;
extern crate libc;
extern crate msg;
extern crate net_traits;
extern crate offscreen_gl_context;
extern crate profile_traits;
extern crate serde;
extern crate style_traits;
extern crate time;
extern crate url;
extern crate util;
mod script_msg;
use app_units::Au;
use devtools_traits::ScriptToDevtoolsControlMsg;
use euclid::Size2D;
use euclid::length::Length;
use euclid::point::Point2D;
use euclid::rect::Rect;
use gfx_traits::Epoch;
use gfx_traits::LayerId;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use libc::c_void;
use msg::constellation_msg::{ConstellationChan, Failure, PipelineId, WindowSizeData};
use msg::constellation_msg::{Key, KeyModifiers, KeyState, LoadData};
use msg::constellation_msg::{PipelineNamespaceId, SubpageId};
use msg::webdriver_msg::WebDriverScriptCommand;
use net_traits::ResourceThread;
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::response::HttpsState;
use net_traits::storage_thread::StorageThread;
use profile_traits::mem;
use std::any::Any;
use url::Url;
use util::ipc::OptionalOpaqueIpcSender;
pub use script_msg::{LayoutMsg, ScriptMsg};
/// The address of a node. Layout sends these back. They must be validated via
/// `from_untrusted_node_address` before they can be used, because we do not trust layout.
#[derive(Copy, Clone, Debug)]
pub struct UntrustedNodeAddress(pub *const c_void);
#[allow(unsafe_code)]
unsafe impl Send for UntrustedNodeAddress {}
/// Messages sent to the layout thread from the constellation and/or compositor.
#[derive(Deserialize, Serialize)]
pub enum LayoutControlMsg {
/// Requests that this layout thread exit.
ExitNow,
/// Requests the current epoch (layout counter) from this layout.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks layout to run another step in its animation.
TickAnimations,
/// Informs layout as to which regions of the page are visible.
SetVisibleRects(Vec<(LayerId, Rect<Au>)>),
/// Requests the current load state of Web fonts. `true` is returned if fonts are still loading
/// and `false` is returned if all fonts have loaded.
GetWebFontLoadState(IpcSender<bool>),
}
/// The initial data associated with a newly-created framed pipeline.
#[derive(Deserialize, Serialize)]
pub struct NewLayoutInfo {
/// Id of the parent of this new pipeline.
pub containing_pipeline_id: PipelineId,
/// Id of the newly-created pipeline.
pub new_pipeline_id: PipelineId,
/// Id of the new frame associated with this pipeline.
pub subpage_id: SubpageId,
/// Network request data which will be initiated by the script thread.
pub load_data: LoadData,
/// The paint channel, cast to `OptionalOpaqueIpcSender`. This is really an
/// `Sender<LayoutToPaintMsg>`.
pub paint_chan: OptionalOpaqueIpcSender,
/// Information on what to do on thread failure.
pub failure: Failure,
/// A port on which layout can receive messages from the pipeline.
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
/// A shutdown channel so that layout can notify others when it's done.
pub layout_shutdown_chan: IpcSender<()>,
/// A shutdown channel so that layout can tell the content process to shut down when it's done.
pub content_process_shutdown_chan: IpcSender<()>,
}
/// Messages sent from the constellation or layout to the script thread.
#[derive(Deserialize, Serialize)]
pub enum ConstellationControlMsg {
/// Gives a channel and ID to a layout thread, as well as the ID of that layout's parent
AttachLayout(NewLayoutInfo),
/// Window resized. Sends a DOM event eventually, but first we combine events.
Resize(PipelineId, WindowSizeData),
/// Notifies script that window has been resized but to not take immediate action.
ResizeInactive(PipelineId, WindowSizeData),
/// Notifies the script that a pipeline should be closed.
ExitPipeline(PipelineId),
/// Sends a DOM event.
SendEvent(PipelineId, CompositorEvent),
/// Notifies script of the viewport.
Viewport(PipelineId, Rect<f32>),
/// Requests that the script thread immediately send the constellation the title of a pipeline.
GetTitle(PipelineId),
/// Notifies script thread to suspend all its timers
Freeze(PipelineId),
/// Notifies script thread to resume all its timers
Thaw(PipelineId),
/// Notifies script thread that a url should be loaded in this iframe.
Navigate(PipelineId, SubpageId, LoadData),
/// Requests the script thread forward a mozbrowser event to an iframe it owns
MozBrowserEvent(PipelineId, SubpageId, MozBrowserEvent),
/// Updates the current subpage id of a given iframe
UpdateSubpageId(PipelineId, SubpageId, SubpageId),
/// Set an iframe to be focused. Used when an element in an iframe gains focus.
FocusIFrame(PipelineId, SubpageId),
/// Passes a webdriver command to the script thread for execution
WebDriverScriptCommand(PipelineId, WebDriverScriptCommand),
/// Notifies script thread that all animations are done
TickAllAnimations(PipelineId),
/// Notifies the script thread that a new Web font has been loaded, and thus the page should be
/// reflowed.
WebFontLoaded(PipelineId),
/// Cause a `load` event to be dispatched at the appropriate frame element.
DispatchFrameLoadEvent {
/// The pipeline that has been marked as loaded.
target: PipelineId,
/// The pipeline that contains a frame loading the target pipeline.
parent: PipelineId,
},
/// Notifies a parent frame that one of its child frames is now active.
FramedContentChanged(PipelineId, SubpageId),
/// Report an error from a CSS parser for the given pipeline
ReportCSSError(PipelineId, String, usize, usize, String),
}
/// Used to determine if a script has any pending asynchronous activity.
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum DocumentState {
/// The document has been loaded and is idle.
Idle,
/// The document is either loading or waiting on an event.
Pending,
}
/// For a given pipeline, whether any animations are currently running
/// and any animation callbacks are queued
#[derive(Clone, Eq, PartialEq, Deserialize, Serialize, Debug)]
pub enum AnimationState {
/// Animations are active but no callbacks are queued
AnimationsPresent,
/// Animations are active and callbacks are queued
AnimationCallbacksPresent,
/// No animations are active and no callbacks are queued
NoAnimationsPresent,
/// No animations are active but callbacks are queued
NoAnimationCallbacksPresent,
}
/// The type of input represented by a multi-touch event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum TouchEventType {
/// A new touch point came in contact with the screen.
Down,
/// An existing touch point changed location.
Move,
/// A touch point was removed from the screen.
Up,
/// The system stopped tracking a touch point.
Cancel,
}
/// An opaque identifier for a touch point.
///
/// http://w3c.github.io/touch-events/#widl-Touch-identifier
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct TouchId(pub i32);
/// The mouse button involved in the event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum MouseButton {
/// The left mouse button.
Left,
/// The middle mouse button.
Middle,
/// The right mouse button.
Right,
}
/// The types of mouse events
#[derive(Deserialize, HeapSizeOf, Serialize)]
pub enum MouseEventType {
/// Mouse button clicked
Click,
/// Mouse button down
MouseDown,
/// Mouse button up
MouseUp,
}
/// Events from the compositor that the script thread needs to know about
#[derive(Deserialize, Serialize)]
pub enum CompositorEvent {
/// The window was resized.
ResizeEvent(WindowSizeData),
/// A mouse button state changed.
MouseButtonEvent(MouseEventType, MouseButton, Point2D<f32>),
/// The mouse was moved over a point (or was moved out of the recognizable region).
MouseMoveEvent(Option<Point2D<f32>>),
/// A touch event was generated with a touch ID and location.
TouchEvent(TouchEventType, TouchId, Point2D<f32>),
/// Touchpad pressure event
TouchpadPressureEvent(Point2D<f32>, f32, TouchpadPressurePhase),
/// A key was pressed.
KeyEvent(Key, KeyState, KeyModifiers),
}
/// Touchpad pressure phase for TouchpadPressureEvent.
#[derive(Copy, Clone, HeapSizeOf, PartialEq, Deserialize, Serialize)]
pub enum TouchpadPressurePhase {
/// Pressure before a regular click.
BeforeClick,
/// Pressure after a regular click.
AfterFirstClick,
/// Pressure after a "forceTouch" click
AfterSecondClick,
}
/// An opaque wrapper around script<->layout channels to avoid leaking message types into
/// crates that don't need to know about them.
pub struct OpaqueScriptLayoutChannel(pub (Box<Any + Send>, Box<Any + Send>));
/// Requests a TimerEvent-Message be sent after the given duration.
#[derive(Deserialize, Serialize)]
pub struct TimerEventRequest(pub IpcSender<TimerEvent>,
pub TimerSource,
pub TimerEventId,
pub MsDuration);
/// Notifies the script thread to fire due timers.
/// TimerSource must be FromWindow when dispatched to ScriptThread and
/// must be FromWorker when dispatched to a DedicatedGlobalWorkerScope
#[derive(Deserialize, Serialize)]
pub struct TimerEvent(pub TimerSource, pub TimerEventId);
/// Describes the thread that requested the TimerEvent.
#[derive(Copy, Clone, HeapSizeOf, Deserialize, Serialize)]
pub enum TimerSource {
/// The event was requested from a window (ScriptThread).
FromWindow(PipelineId),
/// The event was requested from a worker (DedicatedGlobalWorkerScope).
FromWorker
}
/// The id to be used for a TimerEvent is defined by the corresponding TimerEventRequest.
#[derive(PartialEq, Eq, Copy, Clone, Debug, HeapSizeOf, Deserialize, Serialize)]
pub struct TimerEventId(pub u32);
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Milliseconds {}
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Nanoseconds {}
/// Amount of milliseconds.
pub type MsDuration = Length<Milliseconds, u64>;
/// Amount of nanoseconds.
pub type NsDuration = Length<Nanoseconds, u64>;
/// Returns the duration since an unspecified epoch measured in ms.
pub fn precise_time_ms() -> MsDuration {
Length::new(time::precise_time_ns() / (1000 * 1000))
}
/// Returns the duration since an unspecified epoch measured in ns.
pub fn precise_time_ns() -> NsDuration |
/// Data needed to construct a script thread.
///
/// NB: *DO NOT* add any Senders or Receivers here! pcwalton will have to rewrite your code if you
/// do! Use IPC senders and receivers instead.
pub struct InitialScriptState {
/// The ID of the pipeline with which this script thread is associated.
pub id: PipelineId,
/// The subpage ID of this pipeline to create in its pipeline parent.
/// If `None`, this is the root.
pub parent_info: Option<(PipelineId, SubpageId)>,
/// The compositor.
pub compositor: IpcSender<ScriptToCompositorMsg>,
/// A channel with which messages can be sent to us (the script thread).
pub control_chan: IpcSender<ConstellationControlMsg>,
/// A port on which messages sent by the constellation to script can be received.
pub control_port: IpcReceiver<ConstellationControlMsg>,
/// A channel on which messages can be sent to the constellation from script.
pub constellation_chan: ConstellationChan<ScriptMsg>,
/// A channel for the layout thread to send messages to the constellation.
pub layout_to_constellation_chan: ConstellationChan<LayoutMsg>,
/// A channel to schedule timer events.
pub scheduler_chan: IpcSender<TimerEventRequest>,
/// Information that script sends out when it panics.
pub failure_info: Failure,
/// A channel to the resource manager thread.
pub resource_thread: ResourceThread,
/// A channel to the storage thread.
pub storage_thread: StorageThread,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// A channel to the time profiler thread.
pub time_profiler_chan: profile_traits::time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
/// A ping will be sent on this channel once the script thread shuts down.
pub content_process_shutdown_chan: IpcSender<()>,
}
/// Encapsulates external communication with the script thread.
#[derive(Clone, Deserialize, Serialize)]
pub struct ScriptControlChan(pub IpcSender<ConstellationControlMsg>);
/// This trait allows creating a `ScriptThread` without depending on the `script`
/// crate.
pub trait ScriptThreadFactory {
/// Create a `ScriptThread`.
fn create(_phantom: Option<&mut Self>,
state: InitialScriptState,
layout_chan: &OpaqueScriptLayoutChannel,
load_data: LoadData);
/// Create a script -> layout channel (`Sender`, `Receiver` pair).
fn create_layout_channel(_phantom: Option<&mut Self>) -> OpaqueScriptLayoutChannel;
/// Clone the `Sender` in `pair`.
fn clone_layout_channel(_phantom: Option<&mut Self>, pair: &OpaqueScriptLayoutChannel)
-> Box<Any + Send>;
}
/// Messages sent from the script thread to the compositor
#[derive(Deserialize, Serialize)]
pub enum ScriptToCompositorMsg {
/// Scroll a page in a window
ScrollFragmentPoint(PipelineId, LayerId, Point2D<f32>, bool),
/// Set title of current page
/// https://html.spec.whatwg.org/multipage/#document.title
SetTitle(PipelineId, Option<String>),
/// Send a key event
SendKeyEvent(Key, KeyState, KeyModifiers),
/// Get Window Informations size and position
GetClientWindow(IpcSender<(Size2D<u32>, Point2D<i32>)>),
/// Move the window to a point
MoveTo(Point2D<i32>),
/// Resize the window to size
ResizeTo(Size2D<u32>),
/// Script has handled a touch event, and either prevented or allowed default actions.
TouchEventProcessed(EventResult),
/// Requests that the compositor shut down.
Exit,
/// Allow the compositor to free script-specific resources.
Exited,
}
/// Whether a DOM event was prevented by web content
#[derive(Deserialize, Serialize)]
pub enum EventResult {
/// Allowed by web content
DefaultAllowed,
/// Prevented by web content
DefaultPrevented,
}
/// Whether the sandbox attribute is present for an iframe element
#[derive(PartialEq, Eq, Copy, Clone, Debug, Deserialize, Serialize)]
pub enum IFrameSandboxState {
/// Sandbox attribute is present
IFrameSandboxed,
/// Sandbox attribute is not present
IFrameUnsandboxed
}
/// Specifies the information required to load a URL in an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfo {
/// Url to load
pub url: Option<Url>,
/// Pipeline ID of the parent of this iframe
pub containing_pipeline_id: PipelineId,
/// The new subpage ID for this load
pub new_subpage_id: SubpageId,
/// The old subpage ID for this iframe, if a page was previously loaded.
pub old_subpage_id: Option<SubpageId>,
/// The new pipeline ID that the iframe has generated.
pub new_pipeline_id: PipelineId,
/// Sandbox type of this iframe
pub sandbox: IFrameSandboxState,
/// Whether this iframe should be considered private
pub is_private: bool,
}
// https://developer.mozilla.org/en-US/docs/Web/API/Using_the_Browser_API#Events
/// The events fired in a Browser API context (`<iframe mozbrowser>`)
#[derive(Deserialize, Serialize)]
pub enum MozBrowserEvent {
/// Sent when the scroll position within a browser `<iframe>` changes.
AsyncScroll,
/// Sent when window.close() is called within a browser `<iframe>`.
Close,
/// Sent when a browser `<iframe>` tries to open a context menu. This allows
/// handling `<menuitem>` element available within the browser `<iframe>`'s content.
ContextMenu,
/// Sent when an error occurred while trying to load content within a browser `<iframe>`.
Error,
/// Sent when the favicon of a browser `<iframe>` changes.
IconChange(String, String, String),
/// Sent when the browser `<iframe>` has reached the server.
Connected,
/// Sent when the browser `<iframe>` has finished loading all its assets.
LoadEnd,
/// Sent when the browser `<iframe>` starts to load a new page.
LoadStart,
/// Sent when a browser `<iframe>`'s location changes.
LocationChange(String, bool, bool),
/// Sent when window.open() is called within a browser `<iframe>`.
OpenWindow,
/// Sent when the SSL state changes within a browser `<iframe>`.
SecurityChange(HttpsState),
/// Sent when alert(), confirm(), or prompt() is called within a browser `<iframe>`.
ShowModalPrompt(String, String, String, String), // TODO(simartin): Handle unblock()
/// Sent when the document.title changes within a browser `<iframe>`.
TitleChange(String),
/// Sent when an HTTP authentification is requested.
UsernameAndPasswordRequired,
/// Sent when a link to a search engine is found.
OpenSearch,
}
impl MozBrowserEvent {
/// Get the name of the event as a `& str`
pub fn name(&self) -> &'static str {
match *self {
MozBrowserEvent::AsyncScroll => "mozbrowserasyncscroll",
MozBrowserEvent::Close => "mozbrowserclose",
MozBrowserEvent::Connected => "mozbrowserconnected",
MozBrowserEvent::ContextMenu => "mozbrowsercontextmenu",
MozBrowserEvent::Error => "mozbrowsererror",
MozBrowserEvent::IconChange(_, _, _) => "mozbrowsericonchange",
MozBrowserEvent::LoadEnd => "mozbrowserloadend",
MozBrowserEvent::LoadStart => "mozbrowserloadstart",
MozBrowserEvent::LocationChange(_, _, _) => "mozbrowserlocationchange",
MozBrowserEvent::OpenWindow => "mozbrowseropenwindow",
MozBrowserEvent::SecurityChange(_) => "mozbrowsersecuritychange",
MozBrowserEvent::ShowModalPrompt(_, _, _, _) => "mozbrowsershowmodalprompt",
MozBrowserEvent::TitleChange(_) => "mozbrowsertitlechange",
MozBrowserEvent::UsernameAndPasswordRequired => "mozbrowserusernameandpasswordrequired",
MozBrowserEvent::OpenSearch => "mozbrowseropensearch"
}
}
}
| {
Length::new(time::precise_time_ns())
} | identifier_body |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![feature(custom_derive, plugin)]
#![plugin(heapsize_plugin, plugins, serde_macros)]
#![deny(missing_docs)]
#![deny(unsafe_code)]
extern crate app_units;
extern crate canvas_traits;
extern crate devtools_traits;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
extern crate ipc_channel;
extern crate libc;
extern crate msg;
extern crate net_traits;
extern crate offscreen_gl_context;
extern crate profile_traits;
extern crate serde;
extern crate style_traits;
extern crate time;
extern crate url;
extern crate util;
mod script_msg;
use app_units::Au;
use devtools_traits::ScriptToDevtoolsControlMsg;
use euclid::Size2D;
use euclid::length::Length;
use euclid::point::Point2D;
use euclid::rect::Rect;
use gfx_traits::Epoch;
use gfx_traits::LayerId;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use libc::c_void;
use msg::constellation_msg::{ConstellationChan, Failure, PipelineId, WindowSizeData};
use msg::constellation_msg::{Key, KeyModifiers, KeyState, LoadData};
use msg::constellation_msg::{PipelineNamespaceId, SubpageId};
use msg::webdriver_msg::WebDriverScriptCommand;
use net_traits::ResourceThread;
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::response::HttpsState;
use net_traits::storage_thread::StorageThread;
use profile_traits::mem;
use std::any::Any;
use url::Url;
use util::ipc::OptionalOpaqueIpcSender;
pub use script_msg::{LayoutMsg, ScriptMsg};
/// The address of a node. Layout sends these back. They must be validated via
/// `from_untrusted_node_address` before they can be used, because we do not trust layout.
#[derive(Copy, Clone, Debug)]
pub struct UntrustedNodeAddress(pub *const c_void);
#[allow(unsafe_code)]
unsafe impl Send for UntrustedNodeAddress {}
/// Messages sent to the layout thread from the constellation and/or compositor.
#[derive(Deserialize, Serialize)]
pub enum LayoutControlMsg {
/// Requests that this layout thread exit.
ExitNow,
/// Requests the current epoch (layout counter) from this layout.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks layout to run another step in its animation.
TickAnimations,
/// Informs layout as to which regions of the page are visible.
SetVisibleRects(Vec<(LayerId, Rect<Au>)>),
/// Requests the current load state of Web fonts. `true` is returned if fonts are still loading
/// and `false` is returned if all fonts have loaded.
GetWebFontLoadState(IpcSender<bool>),
}
/// The initial data associated with a newly-created framed pipeline.
#[derive(Deserialize, Serialize)]
pub struct NewLayoutInfo {
/// Id of the parent of this new pipeline.
pub containing_pipeline_id: PipelineId,
/// Id of the newly-created pipeline.
pub new_pipeline_id: PipelineId,
/// Id of the new frame associated with this pipeline.
pub subpage_id: SubpageId,
/// Network request data which will be initiated by the script thread.
pub load_data: LoadData,
/// The paint channel, cast to `OptionalOpaqueIpcSender`. This is really an
/// `Sender<LayoutToPaintMsg>`.
pub paint_chan: OptionalOpaqueIpcSender,
/// Information on what to do on thread failure.
pub failure: Failure,
/// A port on which layout can receive messages from the pipeline.
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
/// A shutdown channel so that layout can notify others when it's done.
pub layout_shutdown_chan: IpcSender<()>,
/// A shutdown channel so that layout can tell the content process to shut down when it's done.
pub content_process_shutdown_chan: IpcSender<()>,
}
/// Messages sent from the constellation or layout to the script thread.
#[derive(Deserialize, Serialize)]
pub enum ConstellationControlMsg {
/// Gives a channel and ID to a layout thread, as well as the ID of that layout's parent
AttachLayout(NewLayoutInfo),
/// Window resized. Sends a DOM event eventually, but first we combine events.
Resize(PipelineId, WindowSizeData),
/// Notifies script that window has been resized but to not take immediate action.
ResizeInactive(PipelineId, WindowSizeData),
/// Notifies the script that a pipeline should be closed.
ExitPipeline(PipelineId),
/// Sends a DOM event.
SendEvent(PipelineId, CompositorEvent),
/// Notifies script of the viewport.
Viewport(PipelineId, Rect<f32>),
/// Requests that the script thread immediately send the constellation the title of a pipeline.
GetTitle(PipelineId),
/// Notifies script thread to suspend all its timers
Freeze(PipelineId),
/// Notifies script thread to resume all its timers
Thaw(PipelineId),
/// Notifies script thread that a url should be loaded in this iframe.
Navigate(PipelineId, SubpageId, LoadData),
/// Requests the script thread forward a mozbrowser event to an iframe it owns
MozBrowserEvent(PipelineId, SubpageId, MozBrowserEvent),
/// Updates the current subpage id of a given iframe
UpdateSubpageId(PipelineId, SubpageId, SubpageId),
/// Set an iframe to be focused. Used when an element in an iframe gains focus.
FocusIFrame(PipelineId, SubpageId),
/// Passes a webdriver command to the script thread for execution
WebDriverScriptCommand(PipelineId, WebDriverScriptCommand),
/// Notifies script thread that all animations are done
TickAllAnimations(PipelineId),
/// Notifies the script thread that a new Web font has been loaded, and thus the page should be
/// reflowed.
WebFontLoaded(PipelineId),
/// Cause a `load` event to be dispatched at the appropriate frame element.
DispatchFrameLoadEvent {
/// The pipeline that has been marked as loaded.
target: PipelineId,
/// The pipeline that contains a frame loading the target pipeline.
parent: PipelineId,
},
/// Notifies a parent frame that one of its child frames is now active.
FramedContentChanged(PipelineId, SubpageId),
/// Report an error from a CSS parser for the given pipeline
ReportCSSError(PipelineId, String, usize, usize, String),
}
/// Used to determine if a script has any pending asynchronous activity.
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum DocumentState {
/// The document has been loaded and is idle.
Idle,
/// The document is either loading or waiting on an event.
Pending,
}
/// For a given pipeline, whether any animations are currently running
/// and any animation callbacks are queued
#[derive(Clone, Eq, PartialEq, Deserialize, Serialize, Debug)]
pub enum AnimationState {
/// Animations are active but no callbacks are queued
AnimationsPresent,
/// Animations are active and callbacks are queued
AnimationCallbacksPresent,
/// No animations are active and no callbacks are queued
NoAnimationsPresent,
/// No animations are active but callbacks are queued
NoAnimationCallbacksPresent,
}
/// The type of input represented by a multi-touch event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum TouchEventType {
/// A new touch point came in contact with the screen.
Down,
/// An existing touch point changed location.
Move,
/// A touch point was removed from the screen.
Up,
/// The system stopped tracking a touch point.
Cancel,
}
/// An opaque identifier for a touch point.
///
/// http://w3c.github.io/touch-events/#widl-Touch-identifier
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct TouchId(pub i32);
/// The mouse button involved in the event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum MouseButton {
/// The left mouse button.
Left,
/// The middle mouse button.
Middle,
/// The right mouse button.
Right,
}
/// The types of mouse events
#[derive(Deserialize, HeapSizeOf, Serialize)]
pub enum MouseEventType {
/// Mouse button clicked
Click,
/// Mouse button down
MouseDown,
/// Mouse button up
MouseUp,
}
/// Events from the compositor that the script thread needs to know about
#[derive(Deserialize, Serialize)]
pub enum CompositorEvent {
/// The window was resized.
ResizeEvent(WindowSizeData),
/// A mouse button state changed.
MouseButtonEvent(MouseEventType, MouseButton, Point2D<f32>),
/// The mouse was moved over a point (or was moved out of the recognizable region).
MouseMoveEvent(Option<Point2D<f32>>),
/// A touch event was generated with a touch ID and location.
TouchEvent(TouchEventType, TouchId, Point2D<f32>),
/// Touchpad pressure event
TouchpadPressureEvent(Point2D<f32>, f32, TouchpadPressurePhase),
/// A key was pressed.
KeyEvent(Key, KeyState, KeyModifiers),
}
/// Touchpad pressure phase for TouchpadPressureEvent.
#[derive(Copy, Clone, HeapSizeOf, PartialEq, Deserialize, Serialize)]
pub enum TouchpadPressurePhase {
/// Pressure before a regular click.
BeforeClick,
/// Pressure after a regular click.
AfterFirstClick,
/// Pressure after a "forceTouch" click
AfterSecondClick,
}
/// An opaque wrapper around script<->layout channels to avoid leaking message types into
/// crates that don't need to know about them.
pub struct OpaqueScriptLayoutChannel(pub (Box<Any + Send>, Box<Any + Send>));
/// Requests a TimerEvent-Message be sent after the given duration.
#[derive(Deserialize, Serialize)]
pub struct | (pub IpcSender<TimerEvent>,
pub TimerSource,
pub TimerEventId,
pub MsDuration);
/// Notifies the script thread to fire due timers.
/// TimerSource must be FromWindow when dispatched to ScriptThread and
/// must be FromWorker when dispatched to a DedicatedGlobalWorkerScope
#[derive(Deserialize, Serialize)]
pub struct TimerEvent(pub TimerSource, pub TimerEventId);
/// Describes the thread that requested the TimerEvent.
#[derive(Copy, Clone, HeapSizeOf, Deserialize, Serialize)]
pub enum TimerSource {
/// The event was requested from a window (ScriptThread).
FromWindow(PipelineId),
/// The event was requested from a worker (DedicatedGlobalWorkerScope).
FromWorker
}
/// The id to be used for a TimerEvent is defined by the corresponding TimerEventRequest.
#[derive(PartialEq, Eq, Copy, Clone, Debug, HeapSizeOf, Deserialize, Serialize)]
pub struct TimerEventId(pub u32);
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Milliseconds {}
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Nanoseconds {}
/// Amount of milliseconds.
pub type MsDuration = Length<Milliseconds, u64>;
/// Amount of nanoseconds.
pub type NsDuration = Length<Nanoseconds, u64>;
/// Returns the duration since an unspecified epoch measured in ms.
pub fn precise_time_ms() -> MsDuration {
Length::new(time::precise_time_ns() / (1000 * 1000))
}
/// Returns the duration since an unspecified epoch measured in ns.
pub fn precise_time_ns() -> NsDuration {
Length::new(time::precise_time_ns())
}
/// Data needed to construct a script thread.
///
/// NB: *DO NOT* add any Senders or Receivers here! pcwalton will have to rewrite your code if you
/// do! Use IPC senders and receivers instead.
pub struct InitialScriptState {
/// The ID of the pipeline with which this script thread is associated.
pub id: PipelineId,
/// The subpage ID of this pipeline to create in its pipeline parent.
/// If `None`, this is the root.
pub parent_info: Option<(PipelineId, SubpageId)>,
/// The compositor.
pub compositor: IpcSender<ScriptToCompositorMsg>,
/// A channel with which messages can be sent to us (the script thread).
pub control_chan: IpcSender<ConstellationControlMsg>,
/// A port on which messages sent by the constellation to script can be received.
pub control_port: IpcReceiver<ConstellationControlMsg>,
/// A channel on which messages can be sent to the constellation from script.
pub constellation_chan: ConstellationChan<ScriptMsg>,
/// A channel for the layout thread to send messages to the constellation.
pub layout_to_constellation_chan: ConstellationChan<LayoutMsg>,
/// A channel to schedule timer events.
pub scheduler_chan: IpcSender<TimerEventRequest>,
/// Information that script sends out when it panics.
pub failure_info: Failure,
/// A channel to the resource manager thread.
pub resource_thread: ResourceThread,
/// A channel to the storage thread.
pub storage_thread: StorageThread,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// A channel to the time profiler thread.
pub time_profiler_chan: profile_traits::time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
/// A ping will be sent on this channel once the script thread shuts down.
pub content_process_shutdown_chan: IpcSender<()>,
}
/// Encapsulates external communication with the script thread.
#[derive(Clone, Deserialize, Serialize)]
pub struct ScriptControlChan(pub IpcSender<ConstellationControlMsg>);
/// This trait allows creating a `ScriptThread` without depending on the `script`
/// crate.
pub trait ScriptThreadFactory {
/// Create a `ScriptThread`.
fn create(_phantom: Option<&mut Self>,
state: InitialScriptState,
layout_chan: &OpaqueScriptLayoutChannel,
load_data: LoadData);
/// Create a script -> layout channel (`Sender`, `Receiver` pair).
fn create_layout_channel(_phantom: Option<&mut Self>) -> OpaqueScriptLayoutChannel;
/// Clone the `Sender` in `pair`.
fn clone_layout_channel(_phantom: Option<&mut Self>, pair: &OpaqueScriptLayoutChannel)
-> Box<Any + Send>;
}
/// Messages sent from the script thread to the compositor
#[derive(Deserialize, Serialize)]
pub enum ScriptToCompositorMsg {
/// Scroll a page in a window
ScrollFragmentPoint(PipelineId, LayerId, Point2D<f32>, bool),
/// Set title of current page
/// https://html.spec.whatwg.org/multipage/#document.title
SetTitle(PipelineId, Option<String>),
/// Send a key event
SendKeyEvent(Key, KeyState, KeyModifiers),
/// Get Window Informations size and position
GetClientWindow(IpcSender<(Size2D<u32>, Point2D<i32>)>),
/// Move the window to a point
MoveTo(Point2D<i32>),
/// Resize the window to size
ResizeTo(Size2D<u32>),
/// Script has handled a touch event, and either prevented or allowed default actions.
TouchEventProcessed(EventResult),
/// Requests that the compositor shut down.
Exit,
/// Allow the compositor to free script-specific resources.
Exited,
}
/// Whether a DOM event was prevented by web content
#[derive(Deserialize, Serialize)]
pub enum EventResult {
/// Allowed by web content
DefaultAllowed,
/// Prevented by web content
DefaultPrevented,
}
/// Whether the sandbox attribute is present for an iframe element
#[derive(PartialEq, Eq, Copy, Clone, Debug, Deserialize, Serialize)]
pub enum IFrameSandboxState {
/// Sandbox attribute is present
IFrameSandboxed,
/// Sandbox attribute is not present
IFrameUnsandboxed
}
/// Specifies the information required to load a URL in an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfo {
/// Url to load
pub url: Option<Url>,
/// Pipeline ID of the parent of this iframe
pub containing_pipeline_id: PipelineId,
/// The new subpage ID for this load
pub new_subpage_id: SubpageId,
/// The old subpage ID for this iframe, if a page was previously loaded.
pub old_subpage_id: Option<SubpageId>,
/// The new pipeline ID that the iframe has generated.
pub new_pipeline_id: PipelineId,
/// Sandbox type of this iframe
pub sandbox: IFrameSandboxState,
/// Whether this iframe should be considered private
pub is_private: bool,
}
// https://developer.mozilla.org/en-US/docs/Web/API/Using_the_Browser_API#Events
/// The events fired in a Browser API context (`<iframe mozbrowser>`)
#[derive(Deserialize, Serialize)]
pub enum MozBrowserEvent {
/// Sent when the scroll position within a browser `<iframe>` changes.
AsyncScroll,
/// Sent when window.close() is called within a browser `<iframe>`.
Close,
/// Sent when a browser `<iframe>` tries to open a context menu. This allows
/// handling `<menuitem>` element available within the browser `<iframe>`'s content.
ContextMenu,
/// Sent when an error occurred while trying to load content within a browser `<iframe>`.
Error,
/// Sent when the favicon of a browser `<iframe>` changes.
IconChange(String, String, String),
/// Sent when the browser `<iframe>` has reached the server.
Connected,
/// Sent when the browser `<iframe>` has finished loading all its assets.
LoadEnd,
/// Sent when the browser `<iframe>` starts to load a new page.
LoadStart,
/// Sent when a browser `<iframe>`'s location changes.
LocationChange(String, bool, bool),
/// Sent when window.open() is called within a browser `<iframe>`.
OpenWindow,
/// Sent when the SSL state changes within a browser `<iframe>`.
SecurityChange(HttpsState),
/// Sent when alert(), confirm(), or prompt() is called within a browser `<iframe>`.
ShowModalPrompt(String, String, String, String), // TODO(simartin): Handle unblock()
/// Sent when the document.title changes within a browser `<iframe>`.
TitleChange(String),
/// Sent when an HTTP authentification is requested.
UsernameAndPasswordRequired,
/// Sent when a link to a search engine is found.
OpenSearch,
}
impl MozBrowserEvent {
/// Get the name of the event as a `& str`
pub fn name(&self) -> &'static str {
match *self {
MozBrowserEvent::AsyncScroll => "mozbrowserasyncscroll",
MozBrowserEvent::Close => "mozbrowserclose",
MozBrowserEvent::Connected => "mozbrowserconnected",
MozBrowserEvent::ContextMenu => "mozbrowsercontextmenu",
MozBrowserEvent::Error => "mozbrowsererror",
MozBrowserEvent::IconChange(_, _, _) => "mozbrowsericonchange",
MozBrowserEvent::LoadEnd => "mozbrowserloadend",
MozBrowserEvent::LoadStart => "mozbrowserloadstart",
MozBrowserEvent::LocationChange(_, _, _) => "mozbrowserlocationchange",
MozBrowserEvent::OpenWindow => "mozbrowseropenwindow",
MozBrowserEvent::SecurityChange(_) => "mozbrowsersecuritychange",
MozBrowserEvent::ShowModalPrompt(_, _, _, _) => "mozbrowsershowmodalprompt",
MozBrowserEvent::TitleChange(_) => "mozbrowsertitlechange",
MozBrowserEvent::UsernameAndPasswordRequired => "mozbrowserusernameandpasswordrequired",
MozBrowserEvent::OpenSearch => "mozbrowseropensearch"
}
}
}
| TimerEventRequest | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![feature(custom_derive, plugin)]
#![plugin(heapsize_plugin, plugins, serde_macros)]
#![deny(missing_docs)]
#![deny(unsafe_code)]
extern crate app_units;
extern crate canvas_traits;
extern crate devtools_traits;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
extern crate ipc_channel;
extern crate libc;
extern crate msg;
extern crate net_traits;
extern crate offscreen_gl_context;
extern crate profile_traits;
extern crate serde;
extern crate style_traits;
extern crate time;
extern crate url;
extern crate util;
mod script_msg;
use app_units::Au;
use devtools_traits::ScriptToDevtoolsControlMsg;
use euclid::Size2D;
use euclid::length::Length;
use euclid::point::Point2D;
use euclid::rect::Rect;
use gfx_traits::Epoch;
use gfx_traits::LayerId;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use libc::c_void;
use msg::constellation_msg::{ConstellationChan, Failure, PipelineId, WindowSizeData};
use msg::constellation_msg::{Key, KeyModifiers, KeyState, LoadData};
use msg::constellation_msg::{PipelineNamespaceId, SubpageId};
use msg::webdriver_msg::WebDriverScriptCommand;
use net_traits::ResourceThread;
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::response::HttpsState;
use net_traits::storage_thread::StorageThread;
use profile_traits::mem;
use std::any::Any;
use url::Url;
use util::ipc::OptionalOpaqueIpcSender;
pub use script_msg::{LayoutMsg, ScriptMsg};
/// The address of a node. Layout sends these back. They must be validated via
/// `from_untrusted_node_address` before they can be used, because we do not trust layout.
#[derive(Copy, Clone, Debug)]
pub struct UntrustedNodeAddress(pub *const c_void);
#[allow(unsafe_code)]
unsafe impl Send for UntrustedNodeAddress {}
/// Messages sent to the layout thread from the constellation and/or compositor.
#[derive(Deserialize, Serialize)]
pub enum LayoutControlMsg {
/// Requests that this layout thread exit.
ExitNow,
/// Requests the current epoch (layout counter) from this layout.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks layout to run another step in its animation.
TickAnimations,
/// Informs layout as to which regions of the page are visible.
SetVisibleRects(Vec<(LayerId, Rect<Au>)>),
/// Requests the current load state of Web fonts. `true` is returned if fonts are still loading
/// and `false` is returned if all fonts have loaded.
GetWebFontLoadState(IpcSender<bool>),
}
/// The initial data associated with a newly-created framed pipeline.
#[derive(Deserialize, Serialize)]
pub struct NewLayoutInfo {
/// Id of the parent of this new pipeline.
pub containing_pipeline_id: PipelineId,
/// Id of the newly-created pipeline.
pub new_pipeline_id: PipelineId,
/// Id of the new frame associated with this pipeline.
pub subpage_id: SubpageId,
/// Network request data which will be initiated by the script thread.
pub load_data: LoadData,
/// The paint channel, cast to `OptionalOpaqueIpcSender`. This is really an
/// `Sender<LayoutToPaintMsg>`.
pub paint_chan: OptionalOpaqueIpcSender,
/// Information on what to do on thread failure.
pub failure: Failure,
/// A port on which layout can receive messages from the pipeline.
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
/// A shutdown channel so that layout can notify others when it's done.
pub layout_shutdown_chan: IpcSender<()>,
/// A shutdown channel so that layout can tell the content process to shut down when it's done.
pub content_process_shutdown_chan: IpcSender<()>,
}
/// Messages sent from the constellation or layout to the script thread.
#[derive(Deserialize, Serialize)]
pub enum ConstellationControlMsg {
/// Gives a channel and ID to a layout thread, as well as the ID of that layout's parent
AttachLayout(NewLayoutInfo),
/// Window resized. Sends a DOM event eventually, but first we combine events.
Resize(PipelineId, WindowSizeData),
/// Notifies script that window has been resized but to not take immediate action.
ResizeInactive(PipelineId, WindowSizeData),
/// Notifies the script that a pipeline should be closed.
ExitPipeline(PipelineId),
/// Sends a DOM event.
SendEvent(PipelineId, CompositorEvent),
/// Notifies script of the viewport.
Viewport(PipelineId, Rect<f32>),
/// Requests that the script thread immediately send the constellation the title of a pipeline.
GetTitle(PipelineId),
/// Notifies script thread to suspend all its timers
Freeze(PipelineId),
/// Notifies script thread to resume all its timers
Thaw(PipelineId),
/// Notifies script thread that a url should be loaded in this iframe.
Navigate(PipelineId, SubpageId, LoadData),
/// Requests the script thread forward a mozbrowser event to an iframe it owns
MozBrowserEvent(PipelineId, SubpageId, MozBrowserEvent),
/// Updates the current subpage id of a given iframe
UpdateSubpageId(PipelineId, SubpageId, SubpageId),
/// Set an iframe to be focused. Used when an element in an iframe gains focus.
FocusIFrame(PipelineId, SubpageId),
/// Passes a webdriver command to the script thread for execution
WebDriverScriptCommand(PipelineId, WebDriverScriptCommand),
/// Notifies script thread that all animations are done
TickAllAnimations(PipelineId),
/// Notifies the script thread that a new Web font has been loaded, and thus the page should be
/// reflowed.
WebFontLoaded(PipelineId),
/// Cause a `load` event to be dispatched at the appropriate frame element.
DispatchFrameLoadEvent {
/// The pipeline that has been marked as loaded.
target: PipelineId,
/// The pipeline that contains a frame loading the target pipeline.
parent: PipelineId,
},
/// Notifies a parent frame that one of its child frames is now active.
FramedContentChanged(PipelineId, SubpageId),
/// Report an error from a CSS parser for the given pipeline
ReportCSSError(PipelineId, String, usize, usize, String),
}
/// Used to determine if a script has any pending asynchronous activity.
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum DocumentState {
/// The document has been loaded and is idle.
Idle,
/// The document is either loading or waiting on an event.
Pending,
}
/// For a given pipeline, whether any animations are currently running
/// and any animation callbacks are queued
#[derive(Clone, Eq, PartialEq, Deserialize, Serialize, Debug)]
pub enum AnimationState {
/// Animations are active but no callbacks are queued
AnimationsPresent,
/// Animations are active and callbacks are queued
AnimationCallbacksPresent,
/// No animations are active and no callbacks are queued
NoAnimationsPresent,
/// No animations are active but callbacks are queued
NoAnimationCallbacksPresent,
}
/// The type of input represented by a multi-touch event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum TouchEventType {
/// A new touch point came in contact with the screen.
Down,
/// An existing touch point changed location.
Move,
/// A touch point was removed from the screen.
Up,
/// The system stopped tracking a touch point.
Cancel,
}
/// An opaque identifier for a touch point.
///
/// http://w3c.github.io/touch-events/#widl-Touch-identifier
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct TouchId(pub i32);
/// The mouse button involved in the event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum MouseButton {
/// The left mouse button.
Left,
/// The middle mouse button.
Middle,
/// The right mouse button.
Right,
}
/// The types of mouse events
#[derive(Deserialize, HeapSizeOf, Serialize)]
pub enum MouseEventType {
/// Mouse button clicked
Click,
/// Mouse button down
MouseDown,
/// Mouse button up
MouseUp,
}
/// Events from the compositor that the script thread needs to know about
#[derive(Deserialize, Serialize)]
pub enum CompositorEvent {
/// The window was resized.
ResizeEvent(WindowSizeData),
/// A mouse button state changed.
MouseButtonEvent(MouseEventType, MouseButton, Point2D<f32>),
/// The mouse was moved over a point (or was moved out of the recognizable region).
MouseMoveEvent(Option<Point2D<f32>>),
/// A touch event was generated with a touch ID and location.
TouchEvent(TouchEventType, TouchId, Point2D<f32>),
/// Touchpad pressure event
TouchpadPressureEvent(Point2D<f32>, f32, TouchpadPressurePhase),
/// A key was pressed.
KeyEvent(Key, KeyState, KeyModifiers),
}
/// Touchpad pressure phase for TouchpadPressureEvent.
#[derive(Copy, Clone, HeapSizeOf, PartialEq, Deserialize, Serialize)]
pub enum TouchpadPressurePhase {
/// Pressure before a regular click.
BeforeClick,
/// Pressure after a regular click.
AfterFirstClick,
/// Pressure after a "forceTouch" click
AfterSecondClick,
}
/// An opaque wrapper around script<->layout channels to avoid leaking message types into
/// crates that don't need to know about them.
pub struct OpaqueScriptLayoutChannel(pub (Box<Any + Send>, Box<Any + Send>));
/// Requests a TimerEvent-Message be sent after the given duration.
#[derive(Deserialize, Serialize)]
pub struct TimerEventRequest(pub IpcSender<TimerEvent>,
pub TimerSource,
pub TimerEventId,
pub MsDuration);
/// Notifies the script thread to fire due timers.
/// TimerSource must be FromWindow when dispatched to ScriptThread and
/// must be FromWorker when dispatched to a DedicatedGlobalWorkerScope
#[derive(Deserialize, Serialize)]
pub struct TimerEvent(pub TimerSource, pub TimerEventId);
/// Describes the thread that requested the TimerEvent.
#[derive(Copy, Clone, HeapSizeOf, Deserialize, Serialize)]
pub enum TimerSource {
/// The event was requested from a window (ScriptThread).
FromWindow(PipelineId),
/// The event was requested from a worker (DedicatedGlobalWorkerScope).
FromWorker
}
/// The id to be used for a TimerEvent is defined by the corresponding TimerEventRequest.
#[derive(PartialEq, Eq, Copy, Clone, Debug, HeapSizeOf, Deserialize, Serialize)]
pub struct TimerEventId(pub u32);
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Milliseconds {}
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Nanoseconds {}
/// Amount of milliseconds.
pub type MsDuration = Length<Milliseconds, u64>;
/// Amount of nanoseconds.
pub type NsDuration = Length<Nanoseconds, u64>;
/// Returns the duration since an unspecified epoch measured in ms.
pub fn precise_time_ms() -> MsDuration {
Length::new(time::precise_time_ns() / (1000 * 1000))
}
/// Returns the duration since an unspecified epoch measured in ns.
pub fn precise_time_ns() -> NsDuration {
Length::new(time::precise_time_ns())
}
/// Data needed to construct a script thread.
///
/// NB: *DO NOT* add any Senders or Receivers here! pcwalton will have to rewrite your code if you
/// do! Use IPC senders and receivers instead.
pub struct InitialScriptState {
/// The ID of the pipeline with which this script thread is associated.
pub id: PipelineId,
/// The subpage ID of this pipeline to create in its pipeline parent.
/// If `None`, this is the root.
pub parent_info: Option<(PipelineId, SubpageId)>,
/// The compositor.
pub compositor: IpcSender<ScriptToCompositorMsg>,
/// A channel with which messages can be sent to us (the script thread).
pub control_chan: IpcSender<ConstellationControlMsg>,
/// A port on which messages sent by the constellation to script can be received.
pub control_port: IpcReceiver<ConstellationControlMsg>,
/// A channel on which messages can be sent to the constellation from script.
pub constellation_chan: ConstellationChan<ScriptMsg>,
/// A channel for the layout thread to send messages to the constellation.
pub layout_to_constellation_chan: ConstellationChan<LayoutMsg>,
/// A channel to schedule timer events.
pub scheduler_chan: IpcSender<TimerEventRequest>,
/// Information that script sends out when it panics.
pub failure_info: Failure,
/// A channel to the resource manager thread.
pub resource_thread: ResourceThread,
/// A channel to the storage thread.
pub storage_thread: StorageThread,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// A channel to the time profiler thread.
pub time_profiler_chan: profile_traits::time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
/// A ping will be sent on this channel once the script thread shuts down.
pub content_process_shutdown_chan: IpcSender<()>,
}
/// Encapsulates external communication with the script thread.
#[derive(Clone, Deserialize, Serialize)]
pub struct ScriptControlChan(pub IpcSender<ConstellationControlMsg>);
/// This trait allows creating a `ScriptThread` without depending on the `script`
/// crate.
pub trait ScriptThreadFactory {
/// Create a `ScriptThread`.
fn create(_phantom: Option<&mut Self>,
state: InitialScriptState,
layout_chan: &OpaqueScriptLayoutChannel,
load_data: LoadData);
/// Create a script -> layout channel (`Sender`, `Receiver` pair).
fn create_layout_channel(_phantom: Option<&mut Self>) -> OpaqueScriptLayoutChannel;
/// Clone the `Sender` in `pair`.
fn clone_layout_channel(_phantom: Option<&mut Self>, pair: &OpaqueScriptLayoutChannel)
-> Box<Any + Send>;
}
/// Messages sent from the script thread to the compositor
#[derive(Deserialize, Serialize)]
pub enum ScriptToCompositorMsg {
/// Scroll a page in a window
ScrollFragmentPoint(PipelineId, LayerId, Point2D<f32>, bool),
/// Set title of current page
/// https://html.spec.whatwg.org/multipage/#document.title
SetTitle(PipelineId, Option<String>),
/// Send a key event
SendKeyEvent(Key, KeyState, KeyModifiers),
/// Get Window Informations size and position
GetClientWindow(IpcSender<(Size2D<u32>, Point2D<i32>)>),
/// Move the window to a point
MoveTo(Point2D<i32>),
/// Resize the window to size
ResizeTo(Size2D<u32>),
/// Script has handled a touch event, and either prevented or allowed default actions.
TouchEventProcessed(EventResult),
/// Requests that the compositor shut down.
Exit,
/// Allow the compositor to free script-specific resources.
Exited,
}
/// Whether a DOM event was prevented by web content
#[derive(Deserialize, Serialize)]
pub enum EventResult {
/// Allowed by web content
DefaultAllowed,
/// Prevented by web content
DefaultPrevented,
}
/// Whether the sandbox attribute is present for an iframe element
#[derive(PartialEq, Eq, Copy, Clone, Debug, Deserialize, Serialize)]
pub enum IFrameSandboxState {
/// Sandbox attribute is present
IFrameSandboxed,
/// Sandbox attribute is not present
IFrameUnsandboxed
}
/// Specifies the information required to load a URL in an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfo {
/// Url to load
pub url: Option<Url>,
/// Pipeline ID of the parent of this iframe
pub containing_pipeline_id: PipelineId,
/// The new subpage ID for this load
pub new_subpage_id: SubpageId,
/// The old subpage ID for this iframe, if a page was previously loaded.
pub old_subpage_id: Option<SubpageId>,
/// The new pipeline ID that the iframe has generated.
pub new_pipeline_id: PipelineId,
/// Sandbox type of this iframe
pub sandbox: IFrameSandboxState,
/// Whether this iframe should be considered private
pub is_private: bool,
}
// https://developer.mozilla.org/en-US/docs/Web/API/Using_the_Browser_API#Events
/// The events fired in a Browser API context (`<iframe mozbrowser>`)
#[derive(Deserialize, Serialize)]
pub enum MozBrowserEvent {
/// Sent when the scroll position within a browser `<iframe>` changes.
AsyncScroll,
/// Sent when window.close() is called within a browser `<iframe>`.
Close,
/// Sent when a browser `<iframe>` tries to open a context menu. This allows
/// handling `<menuitem>` element available within the browser `<iframe>`'s content.
ContextMenu,
/// Sent when an error occurred while trying to load content within a browser `<iframe>`. | IconChange(String, String, String),
/// Sent when the browser `<iframe>` has reached the server.
Connected,
/// Sent when the browser `<iframe>` has finished loading all its assets.
LoadEnd,
/// Sent when the browser `<iframe>` starts to load a new page.
LoadStart,
/// Sent when a browser `<iframe>`'s location changes.
LocationChange(String, bool, bool),
/// Sent when window.open() is called within a browser `<iframe>`.
OpenWindow,
/// Sent when the SSL state changes within a browser `<iframe>`.
SecurityChange(HttpsState),
/// Sent when alert(), confirm(), or prompt() is called within a browser `<iframe>`.
ShowModalPrompt(String, String, String, String), // TODO(simartin): Handle unblock()
/// Sent when the document.title changes within a browser `<iframe>`.
TitleChange(String),
/// Sent when an HTTP authentification is requested.
UsernameAndPasswordRequired,
/// Sent when a link to a search engine is found.
OpenSearch,
}
impl MozBrowserEvent {
/// Get the name of the event as a `& str`
pub fn name(&self) -> &'static str {
match *self {
MozBrowserEvent::AsyncScroll => "mozbrowserasyncscroll",
MozBrowserEvent::Close => "mozbrowserclose",
MozBrowserEvent::Connected => "mozbrowserconnected",
MozBrowserEvent::ContextMenu => "mozbrowsercontextmenu",
MozBrowserEvent::Error => "mozbrowsererror",
MozBrowserEvent::IconChange(_, _, _) => "mozbrowsericonchange",
MozBrowserEvent::LoadEnd => "mozbrowserloadend",
MozBrowserEvent::LoadStart => "mozbrowserloadstart",
MozBrowserEvent::LocationChange(_, _, _) => "mozbrowserlocationchange",
MozBrowserEvent::OpenWindow => "mozbrowseropenwindow",
MozBrowserEvent::SecurityChange(_) => "mozbrowsersecuritychange",
MozBrowserEvent::ShowModalPrompt(_, _, _, _) => "mozbrowsershowmodalprompt",
MozBrowserEvent::TitleChange(_) => "mozbrowsertitlechange",
MozBrowserEvent::UsernameAndPasswordRequired => "mozbrowserusernameandpasswordrequired",
MozBrowserEvent::OpenSearch => "mozbrowseropensearch"
}
}
} | Error,
/// Sent when the favicon of a browser `<iframe>` changes. | random_line_split |
index.rs | use std::collections::HashMap;
use std::io::prelude::*;
use std::fs::File;
use std::path::Path;
use rustc_serialize::json;
use core::dependency::{Dependency, DependencyInner, Kind};
use core::{SourceId, Summary, PackageId, Registry};
use sources::registry::{RegistryPackage, RegistryDependency, INDEX_LOCK}; |
pub struct RegistryIndex<'cfg> {
source_id: SourceId,
path: Filesystem,
cache: HashMap<String, Vec<(Summary, bool)>>,
hashes: HashMap<(String, String), String>, // (name, vers) => cksum
config: &'cfg Config,
locked: bool,
}
impl<'cfg> RegistryIndex<'cfg> {
pub fn new(id: &SourceId,
path: &Filesystem,
config: &'cfg Config,
locked: bool) -> RegistryIndex<'cfg> {
RegistryIndex {
source_id: id.clone(),
path: path.clone(),
cache: HashMap::new(),
hashes: HashMap::new(),
config: config,
locked: locked,
}
}
/// Return the hash listed for a specified PackageId.
pub fn hash(&mut self, pkg: &PackageId) -> CargoResult<String> {
let key = (pkg.name().to_string(), pkg.version().to_string());
if let Some(s) = self.hashes.get(&key) {
return Ok(s.clone())
}
// Ok, we're missing the key, so parse the index file to load it.
self.summaries(pkg.name())?;
self.hashes.get(&key).chain_error(|| {
internal(format!("no hash listed for {}", pkg))
}).map(|s| s.clone())
}
/// Parse the on-disk metadata for the package provided
///
/// Returns a list of pairs of (summary, yanked) for the package name
/// specified.
pub fn summaries(&mut self, name: &str) -> CargoResult<&Vec<(Summary, bool)>> {
if self.cache.contains_key(name) {
return Ok(self.cache.get(name).unwrap());
}
let summaries = self.load_summaries(name)?;
let summaries = summaries.into_iter().filter(|summary| {
summary.0.package_id().name() == name
}).collect();
self.cache.insert(name.to_string(), summaries);
Ok(self.cache.get(name).unwrap())
}
fn load_summaries(&mut self, name: &str) -> CargoResult<Vec<(Summary, bool)>> {
let (path, _lock) = if self.locked {
let lock = self.path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index");
match lock {
Ok(lock) => {
(lock.path().parent().unwrap().to_path_buf(), Some(lock))
}
Err(_) => return Ok(Vec::new()),
}
} else {
(self.path.clone().into_path_unlocked(), None)
};
let fs_name = name.chars().flat_map(|c| {
c.to_lowercase()
}).collect::<String>();
// see module comment for why this is structured the way it is
let path = match fs_name.len() {
1 => path.join("1").join(&fs_name),
2 => path.join("2").join(&fs_name),
3 => path.join("3").join(&fs_name[..1]).join(&fs_name),
_ => path.join(&fs_name[0..2])
.join(&fs_name[2..4])
.join(&fs_name),
};
match File::open(&path) {
Ok(mut f) => {
let mut contents = String::new();
f.read_to_string(&mut contents)?;
let ret: CargoResult<Vec<(Summary, bool)>>;
ret = contents.lines().filter(|l| l.trim().len() > 0)
.map(|l| self.parse_registry_package(l))
.collect();
ret.chain_error(|| {
internal(format!("failed to parse registry's information \
for: {}", name))
})
}
Err(..) => Ok(Vec::new()),
}
}
/// Parse a line from the registry's index file into a Summary for a
/// package.
///
/// The returned boolean is whether or not the summary has been yanked.
fn parse_registry_package(&mut self, line: &str)
-> CargoResult<(Summary, bool)> {
let RegistryPackage {
name, vers, cksum, deps, features, yanked
} = json::decode::<RegistryPackage>(line)?;
let pkgid = PackageId::new(&name, &vers, &self.source_id)?;
let deps: CargoResult<Vec<Dependency>> = deps.into_iter().map(|dep| {
self.parse_registry_dependency(dep)
}).collect();
let deps = deps?;
let summary = Summary::new(pkgid, deps, features)?;
let summary = summary.set_checksum(cksum.clone());
self.hashes.insert((name, vers), cksum);
Ok((summary, yanked.unwrap_or(false)))
}
/// Converts an encoded dependency in the registry to a cargo dependency
fn parse_registry_dependency(&self, dep: RegistryDependency)
-> CargoResult<Dependency> {
let RegistryDependency {
name, req, features, optional, default_features, target, kind
} = dep;
let dep = DependencyInner::parse(&name, Some(&req), &self.source_id, None)?;
let kind = match kind.as_ref().map(|s| &s[..]).unwrap_or("") {
"dev" => Kind::Development,
"build" => Kind::Build,
_ => Kind::Normal,
};
let platform = match target {
Some(target) => Some(target.parse()?),
None => None,
};
// Unfortunately older versions of cargo and/or the registry ended up
// publishing lots of entries where the features array contained the
// empty feature, "", inside. This confuses the resolution process much
// later on and these features aren't actually valid, so filter them all
// out here.
let features = features.into_iter().filter(|s|!s.is_empty()).collect();
Ok(dep.set_optional(optional)
.set_default_features(default_features)
.set_features(features)
.set_platform(platform)
.set_kind(kind)
.into_dependency())
}
}
impl<'cfg> Registry for RegistryIndex<'cfg> {
fn query(&mut self, dep: &Dependency) -> CargoResult<Vec<Summary>> {
let mut summaries = {
let summaries = self.summaries(dep.name())?;
summaries.iter().filter(|&&(_, yanked)| {
dep.source_id().precise().is_some() ||!yanked
}).map(|s| s.0.clone()).collect::<Vec<_>>()
};
// Handle `cargo update --precise` here. If specified, our own source
// will have a precise version listed of the form `<pkg>=<req>` where
// `<pkg>` is the name of a crate on this source and `<req>` is the
// version requested (agument to `--precise`).
summaries.retain(|s| {
match self.source_id.precise() {
Some(p) if p.starts_with(dep.name()) &&
p[dep.name().len()..].starts_with('=') => {
let vers = &p[dep.name().len() + 1..];
s.version().to_string() == vers
}
_ => true,
}
});
summaries.query(dep)
}
fn supports_checksums(&self) -> bool {
true
}
} | use util::{CargoResult, ChainError, internal, Filesystem, Config}; | random_line_split |
index.rs | use std::collections::HashMap;
use std::io::prelude::*;
use std::fs::File;
use std::path::Path;
use rustc_serialize::json;
use core::dependency::{Dependency, DependencyInner, Kind};
use core::{SourceId, Summary, PackageId, Registry};
use sources::registry::{RegistryPackage, RegistryDependency, INDEX_LOCK};
use util::{CargoResult, ChainError, internal, Filesystem, Config};
pub struct RegistryIndex<'cfg> {
source_id: SourceId,
path: Filesystem,
cache: HashMap<String, Vec<(Summary, bool)>>,
hashes: HashMap<(String, String), String>, // (name, vers) => cksum
config: &'cfg Config,
locked: bool,
}
impl<'cfg> RegistryIndex<'cfg> {
pub fn new(id: &SourceId,
path: &Filesystem,
config: &'cfg Config,
locked: bool) -> RegistryIndex<'cfg> {
RegistryIndex {
source_id: id.clone(),
path: path.clone(),
cache: HashMap::new(),
hashes: HashMap::new(),
config: config,
locked: locked,
}
}
/// Return the hash listed for a specified PackageId.
pub fn hash(&mut self, pkg: &PackageId) -> CargoResult<String> {
let key = (pkg.name().to_string(), pkg.version().to_string());
if let Some(s) = self.hashes.get(&key) {
return Ok(s.clone())
}
// Ok, we're missing the key, so parse the index file to load it.
self.summaries(pkg.name())?;
self.hashes.get(&key).chain_error(|| {
internal(format!("no hash listed for {}", pkg))
}).map(|s| s.clone())
}
/// Parse the on-disk metadata for the package provided
///
/// Returns a list of pairs of (summary, yanked) for the package name
/// specified.
pub fn | (&mut self, name: &str) -> CargoResult<&Vec<(Summary, bool)>> {
if self.cache.contains_key(name) {
return Ok(self.cache.get(name).unwrap());
}
let summaries = self.load_summaries(name)?;
let summaries = summaries.into_iter().filter(|summary| {
summary.0.package_id().name() == name
}).collect();
self.cache.insert(name.to_string(), summaries);
Ok(self.cache.get(name).unwrap())
}
fn load_summaries(&mut self, name: &str) -> CargoResult<Vec<(Summary, bool)>> {
let (path, _lock) = if self.locked {
let lock = self.path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index");
match lock {
Ok(lock) => {
(lock.path().parent().unwrap().to_path_buf(), Some(lock))
}
Err(_) => return Ok(Vec::new()),
}
} else {
(self.path.clone().into_path_unlocked(), None)
};
let fs_name = name.chars().flat_map(|c| {
c.to_lowercase()
}).collect::<String>();
// see module comment for why this is structured the way it is
let path = match fs_name.len() {
1 => path.join("1").join(&fs_name),
2 => path.join("2").join(&fs_name),
3 => path.join("3").join(&fs_name[..1]).join(&fs_name),
_ => path.join(&fs_name[0..2])
.join(&fs_name[2..4])
.join(&fs_name),
};
match File::open(&path) {
Ok(mut f) => {
let mut contents = String::new();
f.read_to_string(&mut contents)?;
let ret: CargoResult<Vec<(Summary, bool)>>;
ret = contents.lines().filter(|l| l.trim().len() > 0)
.map(|l| self.parse_registry_package(l))
.collect();
ret.chain_error(|| {
internal(format!("failed to parse registry's information \
for: {}", name))
})
}
Err(..) => Ok(Vec::new()),
}
}
/// Parse a line from the registry's index file into a Summary for a
/// package.
///
/// The returned boolean is whether or not the summary has been yanked.
fn parse_registry_package(&mut self, line: &str)
-> CargoResult<(Summary, bool)> {
let RegistryPackage {
name, vers, cksum, deps, features, yanked
} = json::decode::<RegistryPackage>(line)?;
let pkgid = PackageId::new(&name, &vers, &self.source_id)?;
let deps: CargoResult<Vec<Dependency>> = deps.into_iter().map(|dep| {
self.parse_registry_dependency(dep)
}).collect();
let deps = deps?;
let summary = Summary::new(pkgid, deps, features)?;
let summary = summary.set_checksum(cksum.clone());
self.hashes.insert((name, vers), cksum);
Ok((summary, yanked.unwrap_or(false)))
}
/// Converts an encoded dependency in the registry to a cargo dependency
fn parse_registry_dependency(&self, dep: RegistryDependency)
-> CargoResult<Dependency> {
let RegistryDependency {
name, req, features, optional, default_features, target, kind
} = dep;
let dep = DependencyInner::parse(&name, Some(&req), &self.source_id, None)?;
let kind = match kind.as_ref().map(|s| &s[..]).unwrap_or("") {
"dev" => Kind::Development,
"build" => Kind::Build,
_ => Kind::Normal,
};
let platform = match target {
Some(target) => Some(target.parse()?),
None => None,
};
// Unfortunately older versions of cargo and/or the registry ended up
// publishing lots of entries where the features array contained the
// empty feature, "", inside. This confuses the resolution process much
// later on and these features aren't actually valid, so filter them all
// out here.
let features = features.into_iter().filter(|s|!s.is_empty()).collect();
Ok(dep.set_optional(optional)
.set_default_features(default_features)
.set_features(features)
.set_platform(platform)
.set_kind(kind)
.into_dependency())
}
}
impl<'cfg> Registry for RegistryIndex<'cfg> {
fn query(&mut self, dep: &Dependency) -> CargoResult<Vec<Summary>> {
let mut summaries = {
let summaries = self.summaries(dep.name())?;
summaries.iter().filter(|&&(_, yanked)| {
dep.source_id().precise().is_some() ||!yanked
}).map(|s| s.0.clone()).collect::<Vec<_>>()
};
// Handle `cargo update --precise` here. If specified, our own source
// will have a precise version listed of the form `<pkg>=<req>` where
// `<pkg>` is the name of a crate on this source and `<req>` is the
// version requested (agument to `--precise`).
summaries.retain(|s| {
match self.source_id.precise() {
Some(p) if p.starts_with(dep.name()) &&
p[dep.name().len()..].starts_with('=') => {
let vers = &p[dep.name().len() + 1..];
s.version().to_string() == vers
}
_ => true,
}
});
summaries.query(dep)
}
fn supports_checksums(&self) -> bool {
true
}
}
| summaries | identifier_name |
index.rs | use std::collections::HashMap;
use std::io::prelude::*;
use std::fs::File;
use std::path::Path;
use rustc_serialize::json;
use core::dependency::{Dependency, DependencyInner, Kind};
use core::{SourceId, Summary, PackageId, Registry};
use sources::registry::{RegistryPackage, RegistryDependency, INDEX_LOCK};
use util::{CargoResult, ChainError, internal, Filesystem, Config};
pub struct RegistryIndex<'cfg> {
source_id: SourceId,
path: Filesystem,
cache: HashMap<String, Vec<(Summary, bool)>>,
hashes: HashMap<(String, String), String>, // (name, vers) => cksum
config: &'cfg Config,
locked: bool,
}
impl<'cfg> RegistryIndex<'cfg> {
pub fn new(id: &SourceId,
path: &Filesystem,
config: &'cfg Config,
locked: bool) -> RegistryIndex<'cfg> {
RegistryIndex {
source_id: id.clone(),
path: path.clone(),
cache: HashMap::new(),
hashes: HashMap::new(),
config: config,
locked: locked,
}
}
/// Return the hash listed for a specified PackageId.
pub fn hash(&mut self, pkg: &PackageId) -> CargoResult<String> {
let key = (pkg.name().to_string(), pkg.version().to_string());
if let Some(s) = self.hashes.get(&key) {
return Ok(s.clone())
}
// Ok, we're missing the key, so parse the index file to load it.
self.summaries(pkg.name())?;
self.hashes.get(&key).chain_error(|| {
internal(format!("no hash listed for {}", pkg))
}).map(|s| s.clone())
}
/// Parse the on-disk metadata for the package provided
///
/// Returns a list of pairs of (summary, yanked) for the package name
/// specified.
pub fn summaries(&mut self, name: &str) -> CargoResult<&Vec<(Summary, bool)>> {
if self.cache.contains_key(name) {
return Ok(self.cache.get(name).unwrap());
}
let summaries = self.load_summaries(name)?;
let summaries = summaries.into_iter().filter(|summary| {
summary.0.package_id().name() == name
}).collect();
self.cache.insert(name.to_string(), summaries);
Ok(self.cache.get(name).unwrap())
}
fn load_summaries(&mut self, name: &str) -> CargoResult<Vec<(Summary, bool)>> | let path = match fs_name.len() {
1 => path.join("1").join(&fs_name),
2 => path.join("2").join(&fs_name),
3 => path.join("3").join(&fs_name[..1]).join(&fs_name),
_ => path.join(&fs_name[0..2])
.join(&fs_name[2..4])
.join(&fs_name),
};
match File::open(&path) {
Ok(mut f) => {
let mut contents = String::new();
f.read_to_string(&mut contents)?;
let ret: CargoResult<Vec<(Summary, bool)>>;
ret = contents.lines().filter(|l| l.trim().len() > 0)
.map(|l| self.parse_registry_package(l))
.collect();
ret.chain_error(|| {
internal(format!("failed to parse registry's information \
for: {}", name))
})
}
Err(..) => Ok(Vec::new()),
}
}
/// Parse a line from the registry's index file into a Summary for a
/// package.
///
/// The returned boolean is whether or not the summary has been yanked.
fn parse_registry_package(&mut self, line: &str)
-> CargoResult<(Summary, bool)> {
let RegistryPackage {
name, vers, cksum, deps, features, yanked
} = json::decode::<RegistryPackage>(line)?;
let pkgid = PackageId::new(&name, &vers, &self.source_id)?;
let deps: CargoResult<Vec<Dependency>> = deps.into_iter().map(|dep| {
self.parse_registry_dependency(dep)
}).collect();
let deps = deps?;
let summary = Summary::new(pkgid, deps, features)?;
let summary = summary.set_checksum(cksum.clone());
self.hashes.insert((name, vers), cksum);
Ok((summary, yanked.unwrap_or(false)))
}
/// Converts an encoded dependency in the registry to a cargo dependency
fn parse_registry_dependency(&self, dep: RegistryDependency)
-> CargoResult<Dependency> {
let RegistryDependency {
name, req, features, optional, default_features, target, kind
} = dep;
let dep = DependencyInner::parse(&name, Some(&req), &self.source_id, None)?;
let kind = match kind.as_ref().map(|s| &s[..]).unwrap_or("") {
"dev" => Kind::Development,
"build" => Kind::Build,
_ => Kind::Normal,
};
let platform = match target {
Some(target) => Some(target.parse()?),
None => None,
};
// Unfortunately older versions of cargo and/or the registry ended up
// publishing lots of entries where the features array contained the
// empty feature, "", inside. This confuses the resolution process much
// later on and these features aren't actually valid, so filter them all
// out here.
let features = features.into_iter().filter(|s|!s.is_empty()).collect();
Ok(dep.set_optional(optional)
.set_default_features(default_features)
.set_features(features)
.set_platform(platform)
.set_kind(kind)
.into_dependency())
}
}
impl<'cfg> Registry for RegistryIndex<'cfg> {
fn query(&mut self, dep: &Dependency) -> CargoResult<Vec<Summary>> {
let mut summaries = {
let summaries = self.summaries(dep.name())?;
summaries.iter().filter(|&&(_, yanked)| {
dep.source_id().precise().is_some() ||!yanked
}).map(|s| s.0.clone()).collect::<Vec<_>>()
};
// Handle `cargo update --precise` here. If specified, our own source
// will have a precise version listed of the form `<pkg>=<req>` where
// `<pkg>` is the name of a crate on this source and `<req>` is the
// version requested (agument to `--precise`).
summaries.retain(|s| {
match self.source_id.precise() {
Some(p) if p.starts_with(dep.name()) &&
p[dep.name().len()..].starts_with('=') => {
let vers = &p[dep.name().len() + 1..];
s.version().to_string() == vers
}
_ => true,
}
});
summaries.query(dep)
}
fn supports_checksums(&self) -> bool {
true
}
}
| {
let (path, _lock) = if self.locked {
let lock = self.path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index");
match lock {
Ok(lock) => {
(lock.path().parent().unwrap().to_path_buf(), Some(lock))
}
Err(_) => return Ok(Vec::new()),
}
} else {
(self.path.clone().into_path_unlocked(), None)
};
let fs_name = name.chars().flat_map(|c| {
c.to_lowercase()
}).collect::<String>();
// see module comment for why this is structured the way it is | identifier_body |
string_list.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use libc::{c_int};
use std::slice;
use string::cef_string_utf16_set;
use types::{cef_string_list_t,cef_string_t};
use rustc_unicode::str::Utf16Encoder;
//cef_string_list
#[no_mangle]
pub extern "C" fn cef_string_list_alloc() -> *mut cef_string_list_t |
#[no_mangle]
pub extern "C" fn cef_string_list_size(lt: *mut cef_string_list_t) -> c_int {
unsafe {
if lt.is_null() { return 0; }
(*lt).len() as c_int
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_append(lt: *mut cef_string_list_t, value: *const cef_string_t) {
unsafe {
if lt.is_null() { return; }
(*lt).push(String::from_utf16(slice::from_raw_parts((*value).str, (*value).length as usize)).unwrap());
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_value(lt: *mut cef_string_list_t, index: c_int, value: *mut cef_string_t) -> c_int {
unsafe {
if index < 0 || lt.is_null() { return 0; }
if index as usize > (*lt).len() - 1 { return 0; }
let ref string = (*lt)[index as usize];
let utf16_chars: Vec<u16> = Utf16Encoder::new(string.chars()).collect();
cef_string_utf16_set(utf16_chars.as_ptr(), utf16_chars.len() as u64, value, 1)
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_clear(lt: *mut cef_string_list_t) {
unsafe {
if lt.is_null() { return; }
(*lt).clear();
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_free(lt: *mut cef_string_list_t) {
unsafe {
if lt.is_null() { return; }
cef_string_list_clear(lt);
drop(Box::from_raw(lt));
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_copy(lt: *mut cef_string_list_t) -> *mut cef_string_list_t {
unsafe {
if lt.is_null() { return 0 as *mut cef_string_list_t; }
let copy = (*lt).clone();
Box::into_raw(box copy)
}
}
| {
Box::into_raw(box vec!())
} | identifier_body |
string_list.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use libc::{c_int};
use std::slice;
use string::cef_string_utf16_set;
use types::{cef_string_list_t,cef_string_t};
use rustc_unicode::str::Utf16Encoder;
//cef_string_list
#[no_mangle]
pub extern "C" fn cef_string_list_alloc() -> *mut cef_string_list_t {
Box::into_raw(box vec!())
}
#[no_mangle]
pub extern "C" fn cef_string_list_size(lt: *mut cef_string_list_t) -> c_int {
unsafe {
if lt.is_null() { return 0; }
(*lt).len() as c_int
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_append(lt: *mut cef_string_list_t, value: *const cef_string_t) {
unsafe {
if lt.is_null() { return; }
(*lt).push(String::from_utf16(slice::from_raw_parts((*value).str, (*value).length as usize)).unwrap());
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_value(lt: *mut cef_string_list_t, index: c_int, value: *mut cef_string_t) -> c_int {
unsafe {
if index < 0 || lt.is_null() { return 0; }
if index as usize > (*lt).len() - 1 { return 0; }
let ref string = (*lt)[index as usize];
let utf16_chars: Vec<u16> = Utf16Encoder::new(string.chars()).collect();
cef_string_utf16_set(utf16_chars.as_ptr(), utf16_chars.len() as u64, value, 1)
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_clear(lt: *mut cef_string_list_t) {
unsafe {
if lt.is_null() { return; }
(*lt).clear();
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_free(lt: *mut cef_string_list_t) {
unsafe {
if lt.is_null() { return; }
cef_string_list_clear(lt);
drop(Box::from_raw(lt));
}
}
#[no_mangle]
pub extern "C" fn | (lt: *mut cef_string_list_t) -> *mut cef_string_list_t {
unsafe {
if lt.is_null() { return 0 as *mut cef_string_list_t; }
let copy = (*lt).clone();
Box::into_raw(box copy)
}
}
| cef_string_list_copy | identifier_name |
string_list.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use libc::{c_int};
use std::slice;
use string::cef_string_utf16_set;
use types::{cef_string_list_t,cef_string_t};
use rustc_unicode::str::Utf16Encoder;
//cef_string_list
#[no_mangle]
pub extern "C" fn cef_string_list_alloc() -> *mut cef_string_list_t {
Box::into_raw(box vec!())
}
#[no_mangle]
pub extern "C" fn cef_string_list_size(lt: *mut cef_string_list_t) -> c_int {
unsafe {
if lt.is_null() { return 0; }
(*lt).len() as c_int
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_append(lt: *mut cef_string_list_t, value: *const cef_string_t) {
unsafe {
if lt.is_null() { return; }
(*lt).push(String::from_utf16(slice::from_raw_parts((*value).str, (*value).length as usize)).unwrap());
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_value(lt: *mut cef_string_list_t, index: c_int, value: *mut cef_string_t) -> c_int {
unsafe {
if index < 0 || lt.is_null() { return 0; }
if index as usize > (*lt).len() - 1 |
let ref string = (*lt)[index as usize];
let utf16_chars: Vec<u16> = Utf16Encoder::new(string.chars()).collect();
cef_string_utf16_set(utf16_chars.as_ptr(), utf16_chars.len() as u64, value, 1)
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_clear(lt: *mut cef_string_list_t) {
unsafe {
if lt.is_null() { return; }
(*lt).clear();
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_free(lt: *mut cef_string_list_t) {
unsafe {
if lt.is_null() { return; }
cef_string_list_clear(lt);
drop(Box::from_raw(lt));
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_copy(lt: *mut cef_string_list_t) -> *mut cef_string_list_t {
unsafe {
if lt.is_null() { return 0 as *mut cef_string_list_t; }
let copy = (*lt).clone();
Box::into_raw(box copy)
}
}
| { return 0; } | conditional_block |
string_list.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use libc::{c_int};
use std::slice;
use string::cef_string_utf16_set;
use types::{cef_string_list_t,cef_string_t};
use rustc_unicode::str::Utf16Encoder;
//cef_string_list
#[no_mangle]
pub extern "C" fn cef_string_list_alloc() -> *mut cef_string_list_t {
Box::into_raw(box vec!())
}
| pub extern "C" fn cef_string_list_size(lt: *mut cef_string_list_t) -> c_int {
unsafe {
if lt.is_null() { return 0; }
(*lt).len() as c_int
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_append(lt: *mut cef_string_list_t, value: *const cef_string_t) {
unsafe {
if lt.is_null() { return; }
(*lt).push(String::from_utf16(slice::from_raw_parts((*value).str, (*value).length as usize)).unwrap());
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_value(lt: *mut cef_string_list_t, index: c_int, value: *mut cef_string_t) -> c_int {
unsafe {
if index < 0 || lt.is_null() { return 0; }
if index as usize > (*lt).len() - 1 { return 0; }
let ref string = (*lt)[index as usize];
let utf16_chars: Vec<u16> = Utf16Encoder::new(string.chars()).collect();
cef_string_utf16_set(utf16_chars.as_ptr(), utf16_chars.len() as u64, value, 1)
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_clear(lt: *mut cef_string_list_t) {
unsafe {
if lt.is_null() { return; }
(*lt).clear();
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_free(lt: *mut cef_string_list_t) {
unsafe {
if lt.is_null() { return; }
cef_string_list_clear(lt);
drop(Box::from_raw(lt));
}
}
#[no_mangle]
pub extern "C" fn cef_string_list_copy(lt: *mut cef_string_list_t) -> *mut cef_string_list_t {
unsafe {
if lt.is_null() { return 0 as *mut cef_string_list_t; }
let copy = (*lt).clone();
Box::into_raw(box copy)
}
} | #[no_mangle] | random_line_split |
coord_units.rs | //! `userSpaceOnUse` or `objectBoundingBox` values.
use cssparser::Parser;
use crate::error::*;
use crate::parsers::Parse;
/// Defines the units to be used for things that can consider a
/// coordinate system in terms of the current transformation, or in
/// terms of the current object's bounding box.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CoordUnits {
UserSpaceOnUse,
ObjectBoundingBox,
}
impl Parse for CoordUnits {
fn parse<'i>(parser: &mut Parser<'i, '_>) -> Result<Self, ParseError<'i>> {
Ok(parse_identifiers!(
parser,
"userSpaceOnUse" => CoordUnits::UserSpaceOnUse,
"objectBoundingBox" => CoordUnits::ObjectBoundingBox,
)?)
}
}
/// Creates a newtype around `CoordUnits`, with a default value.
///
/// SVG attributes that can take `userSpaceOnUse` or
/// `objectBoundingBox` values often have different default values
/// depending on the type of SVG element. We use this macro to create
/// a newtype for each SVG element and attribute that requires values
/// of this type. The newtype provides an `impl Default` with the
/// specified `$default` value.
#[macro_export]
macro_rules! coord_units {
($name:ident, $default:expr) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct $name(pub CoordUnits);
impl Default for $name {
fn default() -> Self {
$name($default)
}
}
impl From<$name> for CoordUnits {
fn from(u: $name) -> Self {
u.0
}
}
impl $crate::parsers::Parse for $name {
fn parse<'i>(
parser: &mut ::cssparser::Parser<'i, '_>,
) -> Result<Self, $crate::error::ParseError<'i>> {
Ok($name($crate::coord_units::CoordUnits::parse(parser)?))
}
}
};
}
#[cfg(test)]
mod tests {
use super::*;
coord_units!(MyUnits, CoordUnits::ObjectBoundingBox);
#[test]
fn parsing_invalid_strings_yields_error() {
assert!(MyUnits::parse_str("").is_err());
assert!(MyUnits::parse_str("foo").is_err());
}
#[test]
fn parses_paint_server_units() {
assert_eq!(
MyUnits::parse_str("userSpaceOnUse").unwrap(),
MyUnits(CoordUnits::UserSpaceOnUse)
);
assert_eq!(
MyUnits::parse_str("objectBoundingBox").unwrap(),
MyUnits(CoordUnits::ObjectBoundingBox)
);
}
#[test]
fn has_correct_default() {
assert_eq!(MyUnits::default(), MyUnits(CoordUnits::ObjectBoundingBox));
} |
#[test]
fn converts_to_coord_units() {
assert_eq!(
CoordUnits::from(MyUnits(CoordUnits::ObjectBoundingBox)),
CoordUnits::ObjectBoundingBox
);
}
} | random_line_split |
|
coord_units.rs | //! `userSpaceOnUse` or `objectBoundingBox` values.
use cssparser::Parser;
use crate::error::*;
use crate::parsers::Parse;
/// Defines the units to be used for things that can consider a
/// coordinate system in terms of the current transformation, or in
/// terms of the current object's bounding box.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CoordUnits {
UserSpaceOnUse,
ObjectBoundingBox,
}
impl Parse for CoordUnits {
fn parse<'i>(parser: &mut Parser<'i, '_>) -> Result<Self, ParseError<'i>> {
Ok(parse_identifiers!(
parser,
"userSpaceOnUse" => CoordUnits::UserSpaceOnUse,
"objectBoundingBox" => CoordUnits::ObjectBoundingBox,
)?)
}
}
/// Creates a newtype around `CoordUnits`, with a default value.
///
/// SVG attributes that can take `userSpaceOnUse` or
/// `objectBoundingBox` values often have different default values
/// depending on the type of SVG element. We use this macro to create
/// a newtype for each SVG element and attribute that requires values
/// of this type. The newtype provides an `impl Default` with the
/// specified `$default` value.
#[macro_export]
macro_rules! coord_units {
($name:ident, $default:expr) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct $name(pub CoordUnits);
impl Default for $name {
fn default() -> Self {
$name($default)
}
}
impl From<$name> for CoordUnits {
fn from(u: $name) -> Self {
u.0
}
}
impl $crate::parsers::Parse for $name {
fn parse<'i>(
parser: &mut ::cssparser::Parser<'i, '_>,
) -> Result<Self, $crate::error::ParseError<'i>> {
Ok($name($crate::coord_units::CoordUnits::parse(parser)?))
}
}
};
}
#[cfg(test)]
mod tests {
use super::*;
coord_units!(MyUnits, CoordUnits::ObjectBoundingBox);
#[test]
fn | () {
assert!(MyUnits::parse_str("").is_err());
assert!(MyUnits::parse_str("foo").is_err());
}
#[test]
fn parses_paint_server_units() {
assert_eq!(
MyUnits::parse_str("userSpaceOnUse").unwrap(),
MyUnits(CoordUnits::UserSpaceOnUse)
);
assert_eq!(
MyUnits::parse_str("objectBoundingBox").unwrap(),
MyUnits(CoordUnits::ObjectBoundingBox)
);
}
#[test]
fn has_correct_default() {
assert_eq!(MyUnits::default(), MyUnits(CoordUnits::ObjectBoundingBox));
}
#[test]
fn converts_to_coord_units() {
assert_eq!(
CoordUnits::from(MyUnits(CoordUnits::ObjectBoundingBox)),
CoordUnits::ObjectBoundingBox
);
}
}
| parsing_invalid_strings_yields_error | identifier_name |
coord_units.rs | //! `userSpaceOnUse` or `objectBoundingBox` values.
use cssparser::Parser;
use crate::error::*;
use crate::parsers::Parse;
/// Defines the units to be used for things that can consider a
/// coordinate system in terms of the current transformation, or in
/// terms of the current object's bounding box.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CoordUnits {
UserSpaceOnUse,
ObjectBoundingBox,
}
impl Parse for CoordUnits {
fn parse<'i>(parser: &mut Parser<'i, '_>) -> Result<Self, ParseError<'i>> {
Ok(parse_identifiers!(
parser,
"userSpaceOnUse" => CoordUnits::UserSpaceOnUse,
"objectBoundingBox" => CoordUnits::ObjectBoundingBox,
)?)
}
}
/// Creates a newtype around `CoordUnits`, with a default value.
///
/// SVG attributes that can take `userSpaceOnUse` or
/// `objectBoundingBox` values often have different default values
/// depending on the type of SVG element. We use this macro to create
/// a newtype for each SVG element and attribute that requires values
/// of this type. The newtype provides an `impl Default` with the
/// specified `$default` value.
#[macro_export]
macro_rules! coord_units {
($name:ident, $default:expr) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct $name(pub CoordUnits);
impl Default for $name {
fn default() -> Self {
$name($default)
}
}
impl From<$name> for CoordUnits {
fn from(u: $name) -> Self {
u.0
}
}
impl $crate::parsers::Parse for $name {
fn parse<'i>(
parser: &mut ::cssparser::Parser<'i, '_>,
) -> Result<Self, $crate::error::ParseError<'i>> {
Ok($name($crate::coord_units::CoordUnits::parse(parser)?))
}
}
};
}
#[cfg(test)]
mod tests {
use super::*;
coord_units!(MyUnits, CoordUnits::ObjectBoundingBox);
#[test]
fn parsing_invalid_strings_yields_error() {
assert!(MyUnits::parse_str("").is_err());
assert!(MyUnits::parse_str("foo").is_err());
}
#[test]
fn parses_paint_server_units() |
#[test]
fn has_correct_default() {
assert_eq!(MyUnits::default(), MyUnits(CoordUnits::ObjectBoundingBox));
}
#[test]
fn converts_to_coord_units() {
assert_eq!(
CoordUnits::from(MyUnits(CoordUnits::ObjectBoundingBox)),
CoordUnits::ObjectBoundingBox
);
}
}
| {
assert_eq!(
MyUnits::parse_str("userSpaceOnUse").unwrap(),
MyUnits(CoordUnits::UserSpaceOnUse)
);
assert_eq!(
MyUnits::parse_str("objectBoundingBox").unwrap(),
MyUnits(CoordUnits::ObjectBoundingBox)
);
} | identifier_body |
fileapi.rs | // Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
//! ApiSet Contract for api-ms-win-core-file-l1
pub const CREATE_NEW: ::DWORD = 1;
pub const CREATE_ALWAYS: ::DWORD = 2;
pub const OPEN_EXISTING: ::DWORD = 3;
pub const OPEN_ALWAYS: ::DWORD = 4;
pub const TRUNCATE_EXISTING: ::DWORD = 5;
pub const INVALID_FILE_SIZE: ::DWORD = 0xFFFFFFFF;
pub const INVALID_SET_FILE_POINTER: ::DWORD = 0xFFFFFFFF;
pub const INVALID_FILE_ATTRIBUTES: ::DWORD = 0xFFFFFFFF;
#[repr(C)] #[derive(Clone, Copy, Debug)]
pub struct W | {
pub dwFileAttributes: ::DWORD,
pub ftCreationTime: ::FILETIME,
pub ftLastAccessTime: ::FILETIME,
pub ftLastWriteTime: ::FILETIME,
pub nFileSizeHigh: ::DWORD,
pub nFileSizeLow: ::DWORD,
}
pub type LPWIN32_FILE_ATTRIBUTE_DATA = *mut WIN32_FILE_ATTRIBUTE_DATA;
#[repr(C)] #[derive(Clone, Copy, Debug)]
pub struct BY_HANDLE_FILE_INFORMATION {
pub dwFileAttributes: ::DWORD,
pub ftCreationTime: ::FILETIME,
pub ftLastAccessTime: ::FILETIME,
pub ftLastWriteTime: ::FILETIME,
pub dwVolumeSerialNumber: ::DWORD,
pub nFileSizeHigh: ::DWORD,
pub nFileSizeLow: ::DWORD,
pub nNumberOfLinks: ::DWORD,
pub nFileIndexHigh: ::DWORD,
pub nFileIndexLow: ::DWORD,
}
pub type PBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION;
pub type LPBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION;
#[repr(C)] #[derive(Clone, Copy, Debug)]
pub struct CREATEFILE2_EXTENDED_PARAMETERS {
pub dwSize: ::DWORD,
pub dwFileAttributes: ::DWORD,
pub dwFileFlags: ::DWORD,
pub dwSecurityQosFlags: ::DWORD,
pub lpSecurityAttributes: ::LPSECURITY_ATTRIBUTES,
pub hTemplateFile: ::HANDLE,
}
pub type PCREATEFILE2_EXTENDED_PARAMETERS = *mut CREATEFILE2_EXTENDED_PARAMETERS;
pub type LPCREATEFILE2_EXTENDED_PARAMETERS = *mut CREATEFILE2_EXTENDED_PARAMETERS;
| IN32_FILE_ATTRIBUTE_DATA | identifier_name |
fileapi.rs | // Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
//! ApiSet Contract for api-ms-win-core-file-l1
pub const CREATE_NEW: ::DWORD = 1;
pub const CREATE_ALWAYS: ::DWORD = 2;
pub const OPEN_EXISTING: ::DWORD = 3;
pub const OPEN_ALWAYS: ::DWORD = 4;
pub const TRUNCATE_EXISTING: ::DWORD = 5;
pub const INVALID_FILE_SIZE: ::DWORD = 0xFFFFFFFF;
pub const INVALID_SET_FILE_POINTER: ::DWORD = 0xFFFFFFFF;
pub const INVALID_FILE_ATTRIBUTES: ::DWORD = 0xFFFFFFFF;
#[repr(C)] #[derive(Clone, Copy, Debug)]
pub struct WIN32_FILE_ATTRIBUTE_DATA {
pub dwFileAttributes: ::DWORD,
pub ftCreationTime: ::FILETIME,
pub ftLastAccessTime: ::FILETIME,
pub ftLastWriteTime: ::FILETIME,
pub nFileSizeHigh: ::DWORD,
pub nFileSizeLow: ::DWORD,
}
pub type LPWIN32_FILE_ATTRIBUTE_DATA = *mut WIN32_FILE_ATTRIBUTE_DATA;
#[repr(C)] #[derive(Clone, Copy, Debug)]
pub struct BY_HANDLE_FILE_INFORMATION {
pub dwFileAttributes: ::DWORD,
pub ftCreationTime: ::FILETIME,
pub ftLastAccessTime: ::FILETIME,
pub ftLastWriteTime: ::FILETIME,
pub dwVolumeSerialNumber: ::DWORD,
pub nFileSizeHigh: ::DWORD,
pub nFileSizeLow: ::DWORD,
pub nNumberOfLinks: ::DWORD,
pub nFileIndexHigh: ::DWORD,
pub nFileIndexLow: ::DWORD,
} | pub struct CREATEFILE2_EXTENDED_PARAMETERS {
pub dwSize: ::DWORD,
pub dwFileAttributes: ::DWORD,
pub dwFileFlags: ::DWORD,
pub dwSecurityQosFlags: ::DWORD,
pub lpSecurityAttributes: ::LPSECURITY_ATTRIBUTES,
pub hTemplateFile: ::HANDLE,
}
pub type PCREATEFILE2_EXTENDED_PARAMETERS = *mut CREATEFILE2_EXTENDED_PARAMETERS;
pub type LPCREATEFILE2_EXTENDED_PARAMETERS = *mut CREATEFILE2_EXTENDED_PARAMETERS; | pub type PBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION;
pub type LPBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION;
#[repr(C)] #[derive(Clone, Copy, Debug)] | random_line_split |
main.rs | use std::io::BufReader;
use std::io::BufRead;
use std::path::Path;
use std::fs::OpenOptions;
use std::io::BufWriter;
use std::io::Write;
fn | () {
let mut read_options = OpenOptions::new();
read_options.read(true);
let mut write_options = OpenOptions::new();
write_options.write(true).create(true);
/* We may use Path to read/write existing file or create
new files. Hence, no error handling for Path. Erros happen
during 'open' based on path options. */
let read_file = read_options.open(Path::new("numbers.txt")).unwrap();
let file_reader = BufReader::new(&read_file);
let write_file = write_options.open(Path::new("output.txt")).unwrap();
let mut writer = BufWriter::new(&write_file);
for line in file_reader.lines(){
let line = line.unwrap();
let num = line.parse::<i32>().unwrap();
match (num % 5, num % 3){
(0, 0) => writeln!(& mut writer, "Num = {}", "FizzBuzz"),
(0, _) => writeln!(& mut writer, "Num = {}", "Fizz"),
(_, 0) => writeln!(& mut writer, "Num = {}", "Buzz"),
_ => writeln!(& mut writer, "Num = {}", num)
};
}
}
| main | identifier_name |
main.rs | use std::io::BufReader;
use std::io::BufRead;
use std::path::Path;
use std::fs::OpenOptions;
use std::io::BufWriter;
use std::io::Write;
fn main() {
let mut read_options = OpenOptions::new();
read_options.read(true);
let mut write_options = OpenOptions::new();
write_options.write(true).create(true);
/* We may use Path to read/write existing file or create
new files. Hence, no error handling for Path. Erros happen
during 'open' based on path options. */
let read_file = read_options.open(Path::new("numbers.txt")).unwrap();
let file_reader = BufReader::new(&read_file);
let write_file = write_options.open(Path::new("output.txt")).unwrap();
let mut writer = BufWriter::new(&write_file);
for line in file_reader.lines(){ | let num = line.parse::<i32>().unwrap();
match (num % 5, num % 3){
(0, 0) => writeln!(& mut writer, "Num = {}", "FizzBuzz"),
(0, _) => writeln!(& mut writer, "Num = {}", "Fizz"),
(_, 0) => writeln!(& mut writer, "Num = {}", "Buzz"),
_ => writeln!(& mut writer, "Num = {}", num)
};
}
} | let line = line.unwrap(); | random_line_split |
main.rs | use std::io::BufReader;
use std::io::BufRead;
use std::path::Path;
use std::fs::OpenOptions;
use std::io::BufWriter;
use std::io::Write;
fn main() | match (num % 5, num % 3){
(0, 0) => writeln!(& mut writer, "Num = {}", "FizzBuzz"),
(0, _) => writeln!(& mut writer, "Num = {}", "Fizz"),
(_, 0) => writeln!(& mut writer, "Num = {}", "Buzz"),
_ => writeln!(& mut writer, "Num = {}", num)
};
}
}
| {
let mut read_options = OpenOptions::new();
read_options.read(true);
let mut write_options = OpenOptions::new();
write_options.write(true).create(true);
/* We may use Path to read/write existing file or create
new files. Hence, no error handling for Path. Erros happen
during 'open' based on path options. */
let read_file = read_options.open(Path::new("numbers.txt")).unwrap();
let file_reader = BufReader::new(&read_file);
let write_file = write_options.open(Path::new("output.txt")).unwrap();
let mut writer = BufWriter::new(&write_file);
for line in file_reader.lines(){
let line = line.unwrap();
let num = line.parse::<i32>().unwrap();
| identifier_body |
constant_offsets.rs | // SPDX-License-Identifier: MIT
// Copyright [email protected]
// Copyright iced contributors
use core::hash::{Hash, Hasher};
use pyo3::class::basic::CompareOp;
use pyo3::prelude::*;
use pyo3::PyObjectProtocol;
use std::collections::hash_map::DefaultHasher;
/// Contains the offsets of the displacement and immediate.
///
/// Call :class:`Decoder.get_constant_offsets` or :class:`Encoder.get_constant_offsets` to get the
/// offsets of the constants after the instruction has been decoded/encoded.
#[pyclass(module = "_iced_x86_py")]
#[text_signature = "(/)"]
#[derive(Copy, Clone)]
pub(crate) struct ConstantOffsets {
pub(crate) offsets: iced_x86::ConstantOffsets,
}
#[pymethods]
impl ConstantOffsets {
/// int: (``u32``) The offset of the displacement, if any
#[getter]
fn displacement_offset(&self) -> u32 {
self.offsets.displacement_offset() as u32
}
/// int: (``u32``) Size in bytes of the displacement, or 0 if there's no displacement
#[getter]
fn displacement_size(&self) -> u32 {
self.offsets.displacement_size() as u32
}
/// int: (``u32``) The offset of the first immediate, if any.
///
/// This field can be invalid even if the operand has an immediate if it's an immediate that isn't part
/// of the instruction stream, eg. ``SHL AL,1``.
#[getter]
fn immediate_offset(&self) -> u32 {
self.offsets.immediate_offset() as u32
}
/// int: (``u32``) Size in bytes of the first immediate, or 0 if there's no immediate
#[getter]
fn immediate_size(&self) -> u32 {
self.offsets.immediate_size() as u32
}
/// int: (``u32``) The offset of the second immediate, if any.
#[getter]
fn | (&self) -> u32 {
self.offsets.immediate_offset2() as u32
}
/// int: (``u32``) Size in bytes of the second immediate, or 0 if there's no second immediate
#[getter]
fn immediate_size2(&self) -> u32 {
self.offsets.immediate_size2() as u32
}
/// bool: ``True`` if :class:`ConstantOffsets.displacement_offset` and :class:`ConstantOffsets.displacement_size` are valid
#[getter]
fn has_displacement(&self) -> bool {
self.offsets.has_displacement()
}
/// bool: ``True`` if :class:`ConstantOffsets.immediate_offset` and :class:`ConstantOffsets.immediate_size` are valid
#[getter]
fn has_immediate(&self) -> bool {
self.offsets.has_immediate()
}
/// bool: ``True`` if :class:`ConstantOffsets.immediate_offset2` and :class:`ConstantOffsets.immediate_size2` are valid
#[getter]
fn has_immediate2(&self) -> bool {
self.offsets.has_immediate2()
}
/// Returns a copy of this instance.
///
/// Returns:
/// ConstantOffsets: A copy of this instance
///
/// This is identical to :class:`ConstantOffsets.copy`
#[text_signature = "($self, /)"]
fn __copy__(&self) -> Self {
*self
}
/// Returns a copy of this instance.
///
/// Args:
/// memo (Any): memo dict
///
/// Returns:
/// ConstantOffsets: A copy of this instance
///
/// This is identical to :class:`ConstantOffsets.copy`
#[text_signature = "($self, memo, /)"]
fn __deepcopy__(&self, _memo: &PyAny) -> Self {
*self
}
/// Returns a copy of this instance.
///
/// Returns:
/// ConstantOffsets: A copy of this instance
#[text_signature = "($self, /)"]
fn copy(&self) -> Self {
*self
}
}
#[pyproto]
impl PyObjectProtocol for ConstantOffsets {
fn __richcmp__(&self, other: PyRef<ConstantOffsets>, op: CompareOp) -> PyObject {
match op {
CompareOp::Eq => (self.offsets == other.offsets).into_py(other.py()),
CompareOp::Ne => (self.offsets!= other.offsets).into_py(other.py()),
_ => other.py().NotImplemented(),
}
}
fn __hash__(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.offsets.hash(&mut hasher);
hasher.finish()
}
}
| immediate_offset2 | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.