file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
hidecm.rs
|
extern crate usbg;
use std::path::PathBuf;
|
use usbg::UsbGadget;
use usbg::UsbGadgetState;
use usbg::UsbGadgetFunction;
use usbg::UsbGadgetConfig;
use usbg::hid;
use usbg::ecm;
fn main() {
// general setup
let mut g1 = UsbGadget::new("g1",
0x1d6b, // Linux Foundation
0x0104, // Multifunction Composite Gadget
usbg::LANGID_EN_US, // LANGID English
"USB Armory",
"Inverse Path",
"d34db33f0123456789");
g1.bcd_device = Some(0x0100); // version 1.0.0
g1.bcd_usb = Some(0x0200); // USB 2.0
// add ECM ethernet
let ecm_function = Box::new(ecm::ECMFunction {
instance_name: "usb0",
dev_addr: "1a:55:89:a2:69:41",
host_addr: "1a:55:89:a2:69:42",
});
g1.functions.push(ecm_function.clone());
// add HID keyboard
let hid_function = Box::new(hid::HIDFunction {
instance_name: "usb0",
protocol: hid::HID_PROTOCOL_KEYBOARD,
subclass: hid::HID_SUBCLASS_BOOT,
report_length: 8,
report_desc: &hid::HID_KEYBOARD_REPORT_DESC,
});
g1.functions.push(hid_function.clone());
// add configuration
let mut c1_functions: Vec<Box<UsbGadgetFunction>> = Vec::new();
c1_functions.push(hid_function.clone());
c1_functions.push(ecm_function.clone());
let c1 = UsbGadgetConfig {
id: 1,
name: "c",
description: "USB Armory ECM + HID",
max_power: Some(120),
functions: c1_functions,
};
g1.configs.push(c1);
let mut usb_state = UsbGadgetState::new();
// usb_state.udc_name("someudc.hg0");
// if you want test against a temp directory you can uncomment this
// let tmp_configfs = PathBuf::from("/tmp/configfs/usb_gadget");
// let _ = fs::create_dir_all(tmp_configfs.as_path());
// usb_state.configfs_path(tmp_configfs);
match usb_state.enable(g1) {
Ok(_) => println!("Enabled"),
Err(e) => println!("Failed: {}", e),
}
}
|
use std::fs;
|
random_line_split
|
traits-negative-impls.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The dummy functions are used to avoid adding new cfail files.
// What happens is that the compiler attempts to squash duplicates and some
// errors are not reported. This way, we make sure that, for each function, different
// typeck phases are involved and all errors are reported.
#![feature(optin_builtin_traits)]
use std::marker::Send;
struct Outer<T: Send>(T);
struct Outer2<T>(T);
unsafe impl<T: Send> Sync for Outer2<T> {}
fn is_send<T: Send>(_: T) {}
fn is_sync<T: Sync>(_: T) {}
fn dummy() {
struct TestType;
impl!Send for TestType {}
Outer(TestType);
//~^ ERROR `dummy::TestType` cannot be sent between threads safely
|
struct TestType;
impl!Send for TestType {}
is_send(TestType);
//~^ ERROR `dummy1b::TestType` cannot be sent between threads safely
}
fn dummy1c() {
struct TestType;
impl!Send for TestType {}
is_send((8, TestType));
//~^ ERROR `dummy1c::TestType` cannot be sent between threads safely
}
fn dummy2() {
struct TestType;
impl!Send for TestType {}
is_send(Box::new(TestType));
//~^ ERROR `dummy2::TestType` cannot be sent between threads safely
}
fn dummy3() {
struct TestType;
impl!Send for TestType {}
is_send(Box::new(Outer2(TestType)));
//~^ ERROR `dummy3::TestType` cannot be sent between threads safely
}
fn main() {
struct TestType;
impl!Send for TestType {}
// This will complain about a missing Send impl because `Sync` is implement *just*
// for T that are `Send`. Look at #20366 and #19950
is_sync(Outer2(TestType));
//~^ ERROR `main::TestType` cannot be sent between threads safely
}
|
//~| ERROR `dummy::TestType` cannot be sent between threads safely
}
fn dummy1b() {
|
random_line_split
|
traits-negative-impls.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The dummy functions are used to avoid adding new cfail files.
// What happens is that the compiler attempts to squash duplicates and some
// errors are not reported. This way, we make sure that, for each function, different
// typeck phases are involved and all errors are reported.
#![feature(optin_builtin_traits)]
use std::marker::Send;
struct Outer<T: Send>(T);
struct Outer2<T>(T);
unsafe impl<T: Send> Sync for Outer2<T> {}
fn is_send<T: Send>(_: T) {}
fn is_sync<T: Sync>(_: T) {}
fn dummy() {
struct TestType;
impl!Send for TestType {}
Outer(TestType);
//~^ ERROR `dummy::TestType` cannot be sent between threads safely
//~| ERROR `dummy::TestType` cannot be sent between threads safely
}
fn dummy1b() {
struct TestType;
impl!Send for TestType {}
is_send(TestType);
//~^ ERROR `dummy1b::TestType` cannot be sent between threads safely
}
fn dummy1c() {
struct TestType;
impl!Send for TestType {}
is_send((8, TestType));
//~^ ERROR `dummy1c::TestType` cannot be sent between threads safely
}
fn dummy2() {
struct TestType;
impl!Send for TestType {}
is_send(Box::new(TestType));
//~^ ERROR `dummy2::TestType` cannot be sent between threads safely
}
fn dummy3() {
struct TestType;
impl!Send for TestType {}
is_send(Box::new(Outer2(TestType)));
//~^ ERROR `dummy3::TestType` cannot be sent between threads safely
}
fn
|
() {
struct TestType;
impl!Send for TestType {}
// This will complain about a missing Send impl because `Sync` is implement *just*
// for T that are `Send`. Look at #20366 and #19950
is_sync(Outer2(TestType));
//~^ ERROR `main::TestType` cannot be sent between threads safely
}
|
main
|
identifier_name
|
recent_chooser_dialog.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use Bin;
use Buildable;
use Container;
|
use Window;
use ffi;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
use std::mem;
use std::ptr;
glib_wrapper! {
pub struct RecentChooserDialog(Object<ffi::GtkRecentChooserDialog, ffi::GtkRecentChooserDialogClass>): Dialog, Window, Bin, Container, Widget, Buildable, RecentChooser;
match fn {
get_type => || ffi::gtk_recent_chooser_dialog_get_type(),
}
}
impl RecentChooserDialog {
//pub fn new<'a, 'b, 'c, P: Into<Option<&'a str>>, Q: IsA<Window> + 'b, R: Into<Option<&'b Q>>, S: Into<Option<&'c str>>>(title: P, parent: R, first_button_text: S, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> RecentChooserDialog {
// unsafe { TODO: call ffi::gtk_recent_chooser_dialog_new() }
//}
//pub fn new_for_manager<'a, 'b, 'c, P: Into<Option<&'a str>>, Q: IsA<Window> + 'b, R: Into<Option<&'b Q>>, S: Into<Option<&'c str>>>(title: P, parent: R, manager: &RecentManager, first_button_text: S, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> RecentChooserDialog {
// unsafe { TODO: call ffi::gtk_recent_chooser_dialog_new_for_manager() }
//}
}
|
use Dialog;
use RecentChooser;
use Widget;
|
random_line_split
|
race_generated.rs
|
// automatically generated by the FlatBuffers compiler, do not modify
extern crate flatbuffers;
use std::mem;
use std::cmp::Ordering;
use self::flatbuffers::{EndianScalar, Follow};
use super::*;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_RACE: i8 = -1;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MAX_RACE: i8 = 2;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_RACE: [Race; 4] = [
Race::None,
Race::Human,
Race::Dwarf,
Race::Elf,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
pub struct Race(pub i8);
#[allow(non_upper_case_globals)]
impl Race {
pub const None: Self = Self(-1);
pub const Human: Self = Self(0);
pub const Dwarf: Self = Self(1);
pub const Elf: Self = Self(2);
pub const ENUM_MIN: i8 = -1;
pub const ENUM_MAX: i8 = 2;
pub const ENUM_VALUES: &'static [Self] = &[
Self::None,
Self::Human,
Self::Dwarf,
Self::Elf,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::None => Some("None"),
Self::Human => Some("Human"),
Self::Dwarf => Some("Dwarf"),
Self::Elf => Some("Elf"),
_ => None,
}
}
}
impl std::fmt::Debug for Race {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for Race {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner
|
}
impl flatbuffers::Push for Race {
type Output = Race;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
unsafe { flatbuffers::emplace_scalar::<i8>(dst, self.0); }
}
}
impl flatbuffers::EndianScalar for Race {
#[inline]
fn to_little_endian(self) -> Self {
let b = i8::to_le(self.0);
Self(b)
}
#[inline]
#[allow(clippy::wrong_self_convention)]
fn from_little_endian(self) -> Self {
let b = i8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for Race {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
i8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for Race {}
|
{
let b = unsafe {
flatbuffers::read_scalar_at::<i8>(buf, loc)
};
Self(b)
}
|
identifier_body
|
race_generated.rs
|
// automatically generated by the FlatBuffers compiler, do not modify
extern crate flatbuffers;
use std::mem;
use std::cmp::Ordering;
use self::flatbuffers::{EndianScalar, Follow};
use super::*;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_RACE: i8 = -1;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MAX_RACE: i8 = 2;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
pub const ENUM_VALUES_RACE: [Race; 4] = [
Race::None,
Race::Human,
Race::Dwarf,
Race::Elf,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
pub struct
|
(pub i8);
#[allow(non_upper_case_globals)]
impl Race {
pub const None: Self = Self(-1);
pub const Human: Self = Self(0);
pub const Dwarf: Self = Self(1);
pub const Elf: Self = Self(2);
pub const ENUM_MIN: i8 = -1;
pub const ENUM_MAX: i8 = 2;
pub const ENUM_VALUES: &'static [Self] = &[
Self::None,
Self::Human,
Self::Dwarf,
Self::Elf,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::None => Some("None"),
Self::Human => Some("Human"),
Self::Dwarf => Some("Dwarf"),
Self::Elf => Some("Elf"),
_ => None,
}
}
}
impl std::fmt::Debug for Race {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for Race {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = unsafe {
flatbuffers::read_scalar_at::<i8>(buf, loc)
};
Self(b)
}
}
impl flatbuffers::Push for Race {
type Output = Race;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
unsafe { flatbuffers::emplace_scalar::<i8>(dst, self.0); }
}
}
impl flatbuffers::EndianScalar for Race {
#[inline]
fn to_little_endian(self) -> Self {
let b = i8::to_le(self.0);
Self(b)
}
#[inline]
#[allow(clippy::wrong_self_convention)]
fn from_little_endian(self) -> Self {
let b = i8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for Race {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
i8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for Race {}
|
Race
|
identifier_name
|
race_generated.rs
|
// automatically generated by the FlatBuffers compiler, do not modify
extern crate flatbuffers;
use std::mem;
use std::cmp::Ordering;
use self::flatbuffers::{EndianScalar, Follow};
use super::*;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
pub const ENUM_MIN_RACE: i8 = -1;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
|
pub const ENUM_VALUES_RACE: [Race; 4] = [
Race::None,
Race::Human,
Race::Dwarf,
Race::Elf,
];
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[repr(transparent)]
pub struct Race(pub i8);
#[allow(non_upper_case_globals)]
impl Race {
pub const None: Self = Self(-1);
pub const Human: Self = Self(0);
pub const Dwarf: Self = Self(1);
pub const Elf: Self = Self(2);
pub const ENUM_MIN: i8 = -1;
pub const ENUM_MAX: i8 = 2;
pub const ENUM_VALUES: &'static [Self] = &[
Self::None,
Self::Human,
Self::Dwarf,
Self::Elf,
];
/// Returns the variant's name or "" if unknown.
pub fn variant_name(self) -> Option<&'static str> {
match self {
Self::None => Some("None"),
Self::Human => Some("Human"),
Self::Dwarf => Some("Dwarf"),
Self::Elf => Some("Elf"),
_ => None,
}
}
}
impl std::fmt::Debug for Race {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(name) = self.variant_name() {
f.write_str(name)
} else {
f.write_fmt(format_args!("<UNKNOWN {:?}>", self.0))
}
}
}
impl<'a> flatbuffers::Follow<'a> for Race {
type Inner = Self;
#[inline]
fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
let b = unsafe {
flatbuffers::read_scalar_at::<i8>(buf, loc)
};
Self(b)
}
}
impl flatbuffers::Push for Race {
type Output = Race;
#[inline]
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
unsafe { flatbuffers::emplace_scalar::<i8>(dst, self.0); }
}
}
impl flatbuffers::EndianScalar for Race {
#[inline]
fn to_little_endian(self) -> Self {
let b = i8::to_le(self.0);
Self(b)
}
#[inline]
#[allow(clippy::wrong_self_convention)]
fn from_little_endian(self) -> Self {
let b = i8::from_le(self.0);
Self(b)
}
}
impl<'a> flatbuffers::Verifiable for Race {
#[inline]
fn run_verifier(
v: &mut flatbuffers::Verifier, pos: usize
) -> Result<(), flatbuffers::InvalidFlatbuffer> {
use self::flatbuffers::Verifiable;
i8::run_verifier(v, pos)
}
}
impl flatbuffers::SimpleToVerifyInSlice for Race {}
|
pub const ENUM_MAX_RACE: i8 = 2;
#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")]
#[allow(non_camel_case_types)]
|
random_line_split
|
rect.rs
|
use crate::{fragment::Bounds, util, Cell, Point};
use std::fmt;
use ncollide2d::shape::{Segment, Shape};
use sauron::{
html::{attributes, attributes::*},
svg::{attributes::*, *},
Node,
};
use std::cmp::Ordering;
#[derive(Debug, Clone)]
pub struct Rect {
pub start: Point,
pub end: Point,
pub is_filled: bool,
pub radius: Option<f32>,
//TODO:Make this as enum
pub is_broken: bool,
}
impl Rect {
/// creates a new rect and reorder the points swapping the end points if necessary
/// such that the start is the most top-left and end point is the most bottom-right
pub(in crate) fn new(start: Point, end: Point, is_filled: bool, is_broken: bool) -> Self
|
pub(in crate) fn rounded_new(
start: Point,
end: Point,
is_filled: bool,
radius: f32,
is_broken: bool,
) -> Self {
let mut rect = Rect {
start,
end,
is_filled,
radius: Some(radius),
is_broken,
};
rect.sort_reorder_end_points();
rect
}
/// reorder the end points swap end points such that
/// start < end
pub(in crate) fn sort_reorder_end_points(&mut self) {
if self.start > self.end {
let tmp_start = self.start;
self.start = self.end;
self.end = tmp_start;
}
}
/// recompute the rect with start and end point offset by the cell
/// location
pub(in crate) fn absolute_position(&self, cell: Cell) -> Self {
Rect {
start: cell.absolute_position(self.start),
end: cell.absolute_position(self.end),
..*self
}
}
pub(in crate) fn scale(&self, scale: f32) -> Self {
Rect {
start: self.start.scale(scale),
end: self.end.scale(scale),
radius: self.radius.map(|r| r * scale),
..*self
}
}
pub(crate) fn width(&self) -> f32 {
self.end.x - self.start.x
}
pub(crate) fn height(&self) -> f32 {
self.end.y - self.start.y
}
pub(crate) fn is_broken(&self) -> bool {
self.is_broken
}
}
impl Bounds for Rect {
fn bounds(&self) -> (Point, Point) {
let aabb = Segment::new(*self.start, *self.end).local_aabb();
(Point::from(*aabb.mins()), Point::from(*aabb.maxs()))
}
}
impl fmt::Display for Rect {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "R {} {}", self.start, self.end)
}
}
impl<MSG> Into<Node<MSG>> for Rect {
fn into(self) -> Node<MSG> {
rect(
vec![
x(self.start.x),
y(self.start.y),
width(self.width()),
height(self.height()),
classes_flag([
("broken", self.is_broken),
("solid",!self.is_broken),
("filled", self.is_filled),
("nofill",!self.is_filled),
]),
if let Some(radius) = self.radius {
rx(radius)
} else {
rx(0)
},
],
vec![],
)
}
}
impl Eq for Rect {}
impl Ord for Rect {
fn cmp(&self, other: &Self) -> Ordering {
self.start
.cmp(&other.start)
.then(self.end.cmp(&other.end))
.then(self.is_filled.cmp(&other.is_filled))
.then(util::opt_ord(self.radius, other.radius))
.then(self.is_broken.cmp(&other.is_broken))
}
}
impl PartialOrd for Rect {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Rect {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
|
{
let mut rect = Rect {
start,
end,
is_filled,
radius: None,
is_broken,
};
rect.sort_reorder_end_points();
rect
}
|
identifier_body
|
rect.rs
|
use crate::{fragment::Bounds, util, Cell, Point};
use std::fmt;
use ncollide2d::shape::{Segment, Shape};
use sauron::{
html::{attributes, attributes::*},
svg::{attributes::*, *},
Node,
};
use std::cmp::Ordering;
#[derive(Debug, Clone)]
pub struct Rect {
pub start: Point,
pub end: Point,
pub is_filled: bool,
pub radius: Option<f32>,
//TODO:Make this as enum
pub is_broken: bool,
}
impl Rect {
/// creates a new rect and reorder the points swapping the end points if necessary
/// such that the start is the most top-left and end point is the most bottom-right
pub(in crate) fn new(start: Point, end: Point, is_filled: bool, is_broken: bool) -> Self {
let mut rect = Rect {
start,
end,
is_filled,
radius: None,
is_broken,
};
rect.sort_reorder_end_points();
rect
}
pub(in crate) fn rounded_new(
start: Point,
end: Point,
is_filled: bool,
radius: f32,
is_broken: bool,
) -> Self {
let mut rect = Rect {
start,
end,
is_filled,
radius: Some(radius),
is_broken,
};
rect.sort_reorder_end_points();
rect
}
/// reorder the end points swap end points such that
/// start < end
pub(in crate) fn sort_reorder_end_points(&mut self) {
if self.start > self.end
|
}
/// recompute the rect with start and end point offset by the cell
/// location
pub(in crate) fn absolute_position(&self, cell: Cell) -> Self {
Rect {
start: cell.absolute_position(self.start),
end: cell.absolute_position(self.end),
..*self
}
}
pub(in crate) fn scale(&self, scale: f32) -> Self {
Rect {
start: self.start.scale(scale),
end: self.end.scale(scale),
radius: self.radius.map(|r| r * scale),
..*self
}
}
pub(crate) fn width(&self) -> f32 {
self.end.x - self.start.x
}
pub(crate) fn height(&self) -> f32 {
self.end.y - self.start.y
}
pub(crate) fn is_broken(&self) -> bool {
self.is_broken
}
}
impl Bounds for Rect {
fn bounds(&self) -> (Point, Point) {
let aabb = Segment::new(*self.start, *self.end).local_aabb();
(Point::from(*aabb.mins()), Point::from(*aabb.maxs()))
}
}
impl fmt::Display for Rect {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "R {} {}", self.start, self.end)
}
}
impl<MSG> Into<Node<MSG>> for Rect {
fn into(self) -> Node<MSG> {
rect(
vec![
x(self.start.x),
y(self.start.y),
width(self.width()),
height(self.height()),
classes_flag([
("broken", self.is_broken),
("solid",!self.is_broken),
("filled", self.is_filled),
("nofill",!self.is_filled),
]),
if let Some(radius) = self.radius {
rx(radius)
} else {
rx(0)
},
],
vec![],
)
}
}
impl Eq for Rect {}
impl Ord for Rect {
fn cmp(&self, other: &Self) -> Ordering {
self.start
.cmp(&other.start)
.then(self.end.cmp(&other.end))
.then(self.is_filled.cmp(&other.is_filled))
.then(util::opt_ord(self.radius, other.radius))
.then(self.is_broken.cmp(&other.is_broken))
}
}
impl PartialOrd for Rect {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Rect {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
|
{
let tmp_start = self.start;
self.start = self.end;
self.end = tmp_start;
}
|
conditional_block
|
rect.rs
|
use crate::{fragment::Bounds, util, Cell, Point};
use std::fmt;
use ncollide2d::shape::{Segment, Shape};
use sauron::{
html::{attributes, attributes::*},
svg::{attributes::*, *},
Node,
};
use std::cmp::Ordering;
#[derive(Debug, Clone)]
pub struct Rect {
pub start: Point,
pub end: Point,
pub is_filled: bool,
pub radius: Option<f32>,
//TODO:Make this as enum
pub is_broken: bool,
}
impl Rect {
/// creates a new rect and reorder the points swapping the end points if necessary
/// such that the start is the most top-left and end point is the most bottom-right
pub(in crate) fn new(start: Point, end: Point, is_filled: bool, is_broken: bool) -> Self {
let mut rect = Rect {
start,
end,
is_filled,
radius: None,
is_broken,
};
rect.sort_reorder_end_points();
rect
}
pub(in crate) fn rounded_new(
start: Point,
end: Point,
is_filled: bool,
radius: f32,
is_broken: bool,
) -> Self {
let mut rect = Rect {
start,
end,
is_filled,
radius: Some(radius),
is_broken,
};
rect.sort_reorder_end_points();
rect
}
/// reorder the end points swap end points such that
/// start < end
pub(in crate) fn sort_reorder_end_points(&mut self) {
if self.start > self.end {
let tmp_start = self.start;
self.start = self.end;
self.end = tmp_start;
}
}
/// recompute the rect with start and end point offset by the cell
/// location
|
}
}
pub(in crate) fn scale(&self, scale: f32) -> Self {
Rect {
start: self.start.scale(scale),
end: self.end.scale(scale),
radius: self.radius.map(|r| r * scale),
..*self
}
}
pub(crate) fn width(&self) -> f32 {
self.end.x - self.start.x
}
pub(crate) fn height(&self) -> f32 {
self.end.y - self.start.y
}
pub(crate) fn is_broken(&self) -> bool {
self.is_broken
}
}
impl Bounds for Rect {
fn bounds(&self) -> (Point, Point) {
let aabb = Segment::new(*self.start, *self.end).local_aabb();
(Point::from(*aabb.mins()), Point::from(*aabb.maxs()))
}
}
impl fmt::Display for Rect {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "R {} {}", self.start, self.end)
}
}
impl<MSG> Into<Node<MSG>> for Rect {
fn into(self) -> Node<MSG> {
rect(
vec![
x(self.start.x),
y(self.start.y),
width(self.width()),
height(self.height()),
classes_flag([
("broken", self.is_broken),
("solid",!self.is_broken),
("filled", self.is_filled),
("nofill",!self.is_filled),
]),
if let Some(radius) = self.radius {
rx(radius)
} else {
rx(0)
},
],
vec![],
)
}
}
impl Eq for Rect {}
impl Ord for Rect {
fn cmp(&self, other: &Self) -> Ordering {
self.start
.cmp(&other.start)
.then(self.end.cmp(&other.end))
.then(self.is_filled.cmp(&other.is_filled))
.then(util::opt_ord(self.radius, other.radius))
.then(self.is_broken.cmp(&other.is_broken))
}
}
impl PartialOrd for Rect {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Rect {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
|
pub(in crate) fn absolute_position(&self, cell: Cell) -> Self {
Rect {
start: cell.absolute_position(self.start),
end: cell.absolute_position(self.end),
..*self
|
random_line_split
|
rect.rs
|
use crate::{fragment::Bounds, util, Cell, Point};
use std::fmt;
use ncollide2d::shape::{Segment, Shape};
use sauron::{
html::{attributes, attributes::*},
svg::{attributes::*, *},
Node,
};
use std::cmp::Ordering;
#[derive(Debug, Clone)]
pub struct Rect {
pub start: Point,
pub end: Point,
pub is_filled: bool,
pub radius: Option<f32>,
//TODO:Make this as enum
pub is_broken: bool,
}
impl Rect {
/// creates a new rect and reorder the points swapping the end points if necessary
/// such that the start is the most top-left and end point is the most bottom-right
pub(in crate) fn
|
(start: Point, end: Point, is_filled: bool, is_broken: bool) -> Self {
let mut rect = Rect {
start,
end,
is_filled,
radius: None,
is_broken,
};
rect.sort_reorder_end_points();
rect
}
pub(in crate) fn rounded_new(
start: Point,
end: Point,
is_filled: bool,
radius: f32,
is_broken: bool,
) -> Self {
let mut rect = Rect {
start,
end,
is_filled,
radius: Some(radius),
is_broken,
};
rect.sort_reorder_end_points();
rect
}
/// reorder the end points swap end points such that
/// start < end
pub(in crate) fn sort_reorder_end_points(&mut self) {
if self.start > self.end {
let tmp_start = self.start;
self.start = self.end;
self.end = tmp_start;
}
}
/// recompute the rect with start and end point offset by the cell
/// location
pub(in crate) fn absolute_position(&self, cell: Cell) -> Self {
Rect {
start: cell.absolute_position(self.start),
end: cell.absolute_position(self.end),
..*self
}
}
pub(in crate) fn scale(&self, scale: f32) -> Self {
Rect {
start: self.start.scale(scale),
end: self.end.scale(scale),
radius: self.radius.map(|r| r * scale),
..*self
}
}
pub(crate) fn width(&self) -> f32 {
self.end.x - self.start.x
}
pub(crate) fn height(&self) -> f32 {
self.end.y - self.start.y
}
pub(crate) fn is_broken(&self) -> bool {
self.is_broken
}
}
impl Bounds for Rect {
fn bounds(&self) -> (Point, Point) {
let aabb = Segment::new(*self.start, *self.end).local_aabb();
(Point::from(*aabb.mins()), Point::from(*aabb.maxs()))
}
}
impl fmt::Display for Rect {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "R {} {}", self.start, self.end)
}
}
impl<MSG> Into<Node<MSG>> for Rect {
fn into(self) -> Node<MSG> {
rect(
vec![
x(self.start.x),
y(self.start.y),
width(self.width()),
height(self.height()),
classes_flag([
("broken", self.is_broken),
("solid",!self.is_broken),
("filled", self.is_filled),
("nofill",!self.is_filled),
]),
if let Some(radius) = self.radius {
rx(radius)
} else {
rx(0)
},
],
vec![],
)
}
}
impl Eq for Rect {}
impl Ord for Rect {
fn cmp(&self, other: &Self) -> Ordering {
self.start
.cmp(&other.start)
.then(self.end.cmp(&other.end))
.then(self.is_filled.cmp(&other.is_filled))
.then(util::opt_ord(self.radius, other.radius))
.then(self.is_broken.cmp(&other.is_broken))
}
}
impl PartialOrd for Rect {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Rect {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
|
new
|
identifier_name
|
add_helpers.rs
|
use proc_macro2::TokenStream;
use quote::quote;
use syn::{Field, Ident, Index};
pub fn tuple_exprs(fields: &[&Field], method_ident: &Ident) -> Vec<TokenStream> {
let mut exprs = vec![];
for i in 0..fields.len() {
let i = Index::from(i);
// generates `self.0.add(rhs.0)`
let expr = quote!(self.#i.#method_ident(rhs.#i));
exprs.push(expr);
}
exprs
}
pub fn struct_exprs(fields: &[&Field], method_ident: &Ident) -> Vec<TokenStream> {
let mut exprs = vec![];
for field in fields {
// It's safe to unwrap because struct fields always have an identifier
let field_id = field.ident.as_ref().unwrap();
// generates `x: self.x.add(rhs.x)`
let expr = quote!(self.#field_id.#method_ident(rhs.#field_id));
|
exprs
}
|
exprs.push(expr)
}
|
random_line_split
|
add_helpers.rs
|
use proc_macro2::TokenStream;
use quote::quote;
use syn::{Field, Ident, Index};
pub fn tuple_exprs(fields: &[&Field], method_ident: &Ident) -> Vec<TokenStream> {
let mut exprs = vec![];
for i in 0..fields.len() {
let i = Index::from(i);
// generates `self.0.add(rhs.0)`
let expr = quote!(self.#i.#method_ident(rhs.#i));
exprs.push(expr);
}
exprs
}
pub fn struct_exprs(fields: &[&Field], method_ident: &Ident) -> Vec<TokenStream>
|
{
let mut exprs = vec![];
for field in fields {
// It's safe to unwrap because struct fields always have an identifier
let field_id = field.ident.as_ref().unwrap();
// generates `x: self.x.add(rhs.x)`
let expr = quote!(self.#field_id.#method_ident(rhs.#field_id));
exprs.push(expr)
}
exprs
}
|
identifier_body
|
|
add_helpers.rs
|
use proc_macro2::TokenStream;
use quote::quote;
use syn::{Field, Ident, Index};
pub fn tuple_exprs(fields: &[&Field], method_ident: &Ident) -> Vec<TokenStream> {
let mut exprs = vec![];
for i in 0..fields.len() {
let i = Index::from(i);
// generates `self.0.add(rhs.0)`
let expr = quote!(self.#i.#method_ident(rhs.#i));
exprs.push(expr);
}
exprs
}
pub fn
|
(fields: &[&Field], method_ident: &Ident) -> Vec<TokenStream> {
let mut exprs = vec![];
for field in fields {
// It's safe to unwrap because struct fields always have an identifier
let field_id = field.ident.as_ref().unwrap();
// generates `x: self.x.add(rhs.x)`
let expr = quote!(self.#field_id.#method_ident(rhs.#field_id));
exprs.push(expr)
}
exprs
}
|
struct_exprs
|
identifier_name
|
mod.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
|
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
pub mod editor;
pub mod plain;
pub mod stdout;
|
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
|
random_line_split
|
mutex.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use cell::UnsafeCell;
use sys::sync as ffi;
use mem;
pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
#[inline]
pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
m.inner.get()
}
// So you might be asking why we're using SRWLock instead of CriticalSection?
//
// 1. SRWLock is several times faster than CriticalSection according to
// benchmarks performed on both Windows 8 and Windows 7.
//
// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix
// implementation deadlocks so consistency is preferred. See #19962 for more
// details.
//
// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
// is there there are no guarantees of fairness.
impl Mutex {
#[inline]
pub unsafe fn lock(&self) {
ffi::AcquireSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
ffi::TryAcquireSRWLockExclusive(self.inner.get())!= 0
}
#[inline]
pub unsafe fn unlock(&self) {
ffi::ReleaseSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn destroy(&self) {
//...
}
}
pub struct ReentrantMutex { inner: Box<UnsafeCell<ffi::CRITICAL_SECTION>> }
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
pub unsafe fn new() -> ReentrantMutex {
let mutex = ReentrantMutex { inner: box mem::uninitialized() };
ffi::InitializeCriticalSection(mutex.inner.get());
mutex
}
|
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
ffi::TryEnterCriticalSection(self.inner.get())!= 0
}
pub unsafe fn unlock(&self) {
ffi::LeaveCriticalSection(self.inner.get());
}
pub unsafe fn destroy(&self) {
ffi::DeleteCriticalSection(self.inner.get());
}
}
|
pub unsafe fn lock(&self) {
ffi::EnterCriticalSection(self.inner.get());
|
random_line_split
|
mutex.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use cell::UnsafeCell;
use sys::sync as ffi;
use mem;
pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
#[inline]
pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
m.inner.get()
}
// So you might be asking why we're using SRWLock instead of CriticalSection?
//
// 1. SRWLock is several times faster than CriticalSection according to
// benchmarks performed on both Windows 8 and Windows 7.
//
// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix
// implementation deadlocks so consistency is preferred. See #19962 for more
// details.
//
// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
// is there there are no guarantees of fairness.
impl Mutex {
#[inline]
pub unsafe fn
|
(&self) {
ffi::AcquireSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
ffi::TryAcquireSRWLockExclusive(self.inner.get())!= 0
}
#[inline]
pub unsafe fn unlock(&self) {
ffi::ReleaseSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn destroy(&self) {
//...
}
}
pub struct ReentrantMutex { inner: Box<UnsafeCell<ffi::CRITICAL_SECTION>> }
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
pub unsafe fn new() -> ReentrantMutex {
let mutex = ReentrantMutex { inner: box mem::uninitialized() };
ffi::InitializeCriticalSection(mutex.inner.get());
mutex
}
pub unsafe fn lock(&self) {
ffi::EnterCriticalSection(self.inner.get());
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
ffi::TryEnterCriticalSection(self.inner.get())!= 0
}
pub unsafe fn unlock(&self) {
ffi::LeaveCriticalSection(self.inner.get());
}
pub unsafe fn destroy(&self) {
ffi::DeleteCriticalSection(self.inner.get());
}
}
|
lock
|
identifier_name
|
mutex.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use cell::UnsafeCell;
use sys::sync as ffi;
use mem;
pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
pub const MUTEX_INIT: Mutex = Mutex {
inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
};
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
#[inline]
pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
m.inner.get()
}
// So you might be asking why we're using SRWLock instead of CriticalSection?
//
// 1. SRWLock is several times faster than CriticalSection according to
// benchmarks performed on both Windows 8 and Windows 7.
//
// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix
// implementation deadlocks so consistency is preferred. See #19962 for more
// details.
//
// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
// is there there are no guarantees of fairness.
impl Mutex {
#[inline]
pub unsafe fn lock(&self) {
ffi::AcquireSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn try_lock(&self) -> bool
|
#[inline]
pub unsafe fn unlock(&self) {
ffi::ReleaseSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn destroy(&self) {
//...
}
}
pub struct ReentrantMutex { inner: Box<UnsafeCell<ffi::CRITICAL_SECTION>> }
unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
pub unsafe fn new() -> ReentrantMutex {
let mutex = ReentrantMutex { inner: box mem::uninitialized() };
ffi::InitializeCriticalSection(mutex.inner.get());
mutex
}
pub unsafe fn lock(&self) {
ffi::EnterCriticalSection(self.inner.get());
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
ffi::TryEnterCriticalSection(self.inner.get())!= 0
}
pub unsafe fn unlock(&self) {
ffi::LeaveCriticalSection(self.inner.get());
}
pub unsafe fn destroy(&self) {
ffi::DeleteCriticalSection(self.inner.get());
}
}
|
{
ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0
}
|
identifier_body
|
single_glyph.rs
|
extern crate freetype as ft;
extern crate unicode_normalization;
use unicode_normalization::UnicodeNormalization;
const WIDTH: usize = 32;
const HEIGHT: usize = 24;
fn
|
(bitmap: ft::Bitmap, x: usize, y: usize) -> [[u8; WIDTH]; HEIGHT] {
let mut figure = [[0; WIDTH]; HEIGHT];
let mut p = 0;
let mut q = 0;
let w = bitmap.width() as usize;
let x_max = x + w;
let y_max = y + bitmap.rows() as usize;
for i in x.. x_max {
for j in y.. y_max {
if i < WIDTH && j < HEIGHT {
figure[j][i] |= bitmap.buffer()[q * w + p];
q += 1;
}
}
q = 0;
p += 1;
}
figure
}
fn main() {
let ref mut args = std::env::args();
if args.len()!= 3 {
let exe = args.next().unwrap();
println!("Usage: {} font character", exe);
return
}
let ref font = args.nth(1).unwrap();
let character = args.next().and_then(|s| s.nfc().next()).unwrap() as usize;
let library = ft::Library::init().unwrap();
let face = library.new_face(font, 0).unwrap();
face.set_char_size(40 * 64, 0, 50, 0).unwrap();
face.load_char(character, ft::face::RENDER).unwrap();
let glyph = face.glyph();
let x = glyph.bitmap_left() as usize;
let y = HEIGHT - glyph.bitmap_top() as usize;
let figure = draw_bitmap(glyph.bitmap(), x, y);
for i in 0.. HEIGHT {
for j in 0.. WIDTH {
print!("{}",
match figure[i][j] {
p if p == 0 => " ",
p if p < 128 => "*",
_ => "+"
}
);
}
println!("");
}
}
|
draw_bitmap
|
identifier_name
|
single_glyph.rs
|
extern crate freetype as ft;
extern crate unicode_normalization;
use unicode_normalization::UnicodeNormalization;
const WIDTH: usize = 32;
const HEIGHT: usize = 24;
fn draw_bitmap(bitmap: ft::Bitmap, x: usize, y: usize) -> [[u8; WIDTH]; HEIGHT]
|
fn main() {
let ref mut args = std::env::args();
if args.len()!= 3 {
let exe = args.next().unwrap();
println!("Usage: {} font character", exe);
return
}
let ref font = args.nth(1).unwrap();
let character = args.next().and_then(|s| s.nfc().next()).unwrap() as usize;
let library = ft::Library::init().unwrap();
let face = library.new_face(font, 0).unwrap();
face.set_char_size(40 * 64, 0, 50, 0).unwrap();
face.load_char(character, ft::face::RENDER).unwrap();
let glyph = face.glyph();
let x = glyph.bitmap_left() as usize;
let y = HEIGHT - glyph.bitmap_top() as usize;
let figure = draw_bitmap(glyph.bitmap(), x, y);
for i in 0.. HEIGHT {
for j in 0.. WIDTH {
print!("{}",
match figure[i][j] {
p if p == 0 => " ",
p if p < 128 => "*",
_ => "+"
}
);
}
println!("");
}
}
|
{
let mut figure = [[0; WIDTH]; HEIGHT];
let mut p = 0;
let mut q = 0;
let w = bitmap.width() as usize;
let x_max = x + w;
let y_max = y + bitmap.rows() as usize;
for i in x .. x_max {
for j in y .. y_max {
if i < WIDTH && j < HEIGHT {
figure[j][i] |= bitmap.buffer()[q * w + p];
q += 1;
}
}
q = 0;
p += 1;
}
figure
}
|
identifier_body
|
single_glyph.rs
|
extern crate freetype as ft;
extern crate unicode_normalization;
use unicode_normalization::UnicodeNormalization;
const WIDTH: usize = 32;
const HEIGHT: usize = 24;
fn draw_bitmap(bitmap: ft::Bitmap, x: usize, y: usize) -> [[u8; WIDTH]; HEIGHT] {
let mut figure = [[0; WIDTH]; HEIGHT];
let mut p = 0;
let mut q = 0;
let w = bitmap.width() as usize;
let x_max = x + w;
let y_max = y + bitmap.rows() as usize;
for i in x.. x_max {
for j in y.. y_max {
if i < WIDTH && j < HEIGHT {
figure[j][i] |= bitmap.buffer()[q * w + p];
q += 1;
}
}
q = 0;
p += 1;
}
figure
}
fn main() {
let ref mut args = std::env::args();
if args.len()!= 3 {
let exe = args.next().unwrap();
println!("Usage: {} font character", exe);
return
}
let ref font = args.nth(1).unwrap();
let character = args.next().and_then(|s| s.nfc().next()).unwrap() as usize;
let library = ft::Library::init().unwrap();
let face = library.new_face(font, 0).unwrap();
face.set_char_size(40 * 64, 0, 50, 0).unwrap();
face.load_char(character, ft::face::RENDER).unwrap();
|
for i in 0.. HEIGHT {
for j in 0.. WIDTH {
print!("{}",
match figure[i][j] {
p if p == 0 => " ",
p if p < 128 => "*",
_ => "+"
}
);
}
println!("");
}
}
|
let glyph = face.glyph();
let x = glyph.bitmap_left() as usize;
let y = HEIGHT - glyph.bitmap_top() as usize;
let figure = draw_bitmap(glyph.bitmap(), x, y);
|
random_line_split
|
single_glyph.rs
|
extern crate freetype as ft;
extern crate unicode_normalization;
use unicode_normalization::UnicodeNormalization;
const WIDTH: usize = 32;
const HEIGHT: usize = 24;
fn draw_bitmap(bitmap: ft::Bitmap, x: usize, y: usize) -> [[u8; WIDTH]; HEIGHT] {
let mut figure = [[0; WIDTH]; HEIGHT];
let mut p = 0;
let mut q = 0;
let w = bitmap.width() as usize;
let x_max = x + w;
let y_max = y + bitmap.rows() as usize;
for i in x.. x_max {
for j in y.. y_max {
if i < WIDTH && j < HEIGHT
|
}
q = 0;
p += 1;
}
figure
}
fn main() {
let ref mut args = std::env::args();
if args.len()!= 3 {
let exe = args.next().unwrap();
println!("Usage: {} font character", exe);
return
}
let ref font = args.nth(1).unwrap();
let character = args.next().and_then(|s| s.nfc().next()).unwrap() as usize;
let library = ft::Library::init().unwrap();
let face = library.new_face(font, 0).unwrap();
face.set_char_size(40 * 64, 0, 50, 0).unwrap();
face.load_char(character, ft::face::RENDER).unwrap();
let glyph = face.glyph();
let x = glyph.bitmap_left() as usize;
let y = HEIGHT - glyph.bitmap_top() as usize;
let figure = draw_bitmap(glyph.bitmap(), x, y);
for i in 0.. HEIGHT {
for j in 0.. WIDTH {
print!("{}",
match figure[i][j] {
p if p == 0 => " ",
p if p < 128 => "*",
_ => "+"
}
);
}
println!("");
}
}
|
{
figure[j][i] |= bitmap.buffer()[q * w + p];
q += 1;
}
|
conditional_block
|
process_builder.rs
|
use std::collections::HashMap;
use std::ffi::CString;
use std::fmt::{self, Formatter};
use std::io::process::{Command, ProcessOutput, InheritFd};
use std::os;
use std::path::BytesContainer;
use util::{CargoResult, ProcessError, process_error};
#[derive(Clone, PartialEq, Show)]
pub struct ProcessBuilder {
program: CString,
args: Vec<CString>,
env: HashMap<String, Option<CString>>,
cwd: Path,
}
impl fmt::String for ProcessBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "`{}", String::from_utf8_lossy(self.program.as_bytes())));
for arg in self.args.iter() {
try!(write!(f, " {}", String::from_utf8_lossy(arg.as_bytes())));
}
write!(f, "`")
}
}
impl ProcessBuilder {
pub fn arg<T: BytesContainer>(mut self, arg: T) -> ProcessBuilder {
self.args.push(CString::from_slice(arg.container_as_bytes()));
self
}
pub fn
|
<T: BytesContainer>(mut self, arguments: &[T]) -> ProcessBuilder {
self.args.extend(arguments.iter().map(|t| {
CString::from_slice(t.container_as_bytes())
}));
self
}
pub fn get_args(&self) -> &[CString] {
self.args.as_slice()
}
pub fn cwd(mut self, path: Path) -> ProcessBuilder {
self.cwd = path;
self
}
pub fn env<T: BytesContainer>(mut self, key: &str,
val: Option<T>) -> ProcessBuilder {
let val = val.map(|t| CString::from_slice(t.container_as_bytes()));
self.env.insert(key.to_string(), val);
self
}
// TODO: should InheritFd be hardcoded?
pub fn exec(&self) -> Result<(), ProcessError> {
let mut command = self.build_command();
command.stdout(InheritFd(1))
.stderr(InheritFd(2))
.stdin(InheritFd(0));
let exit = try!(command.status().map_err(|e| {
process_error(format!("Could not execute process `{}`",
self.debug_string()),
Some(e), None, None)
}));
if exit.success() {
Ok(())
} else {
Err(process_error(format!("Process didn't exit successfully: `{}`",
self.debug_string()),
None, Some(&exit), None))
}
}
pub fn exec_with_output(&self) -> Result<ProcessOutput, ProcessError> {
let command = self.build_command();
let output = try!(command.output().map_err(|e| {
process_error(format!("Could not execute process `{}`",
self.debug_string()),
Some(e), None, None)
}));
if output.status.success() {
Ok(output)
} else {
Err(process_error(format!("Process didn't exit successfully: `{}`",
self.debug_string()),
None, Some(&output.status), Some(&output)))
}
}
pub fn build_command(&self) -> Command {
let mut command = Command::new(&self.program);
command.cwd(&self.cwd);
for arg in self.args.iter() {
command.arg(arg);
}
for (k, v) in self.env.iter() {
let k = k.as_slice();
match *v {
Some(ref v) => { command.env(k, v); }
None => { command.env_remove(k); }
}
}
command
}
fn debug_string(&self) -> String {
let mut program = format!("{}", String::from_utf8_lossy(self.program.as_bytes()));
for arg in self.args.iter() {
program.push(' ');
program.push_str(&format!("{}", String::from_utf8_lossy(arg.as_bytes()))[]);
}
program
}
}
pub fn process<T: BytesContainer>(cmd: T) -> CargoResult<ProcessBuilder> {
Ok(ProcessBuilder {
program: CString::from_slice(cmd.container_as_bytes()),
args: Vec::new(),
cwd: try!(os::getcwd()),
env: HashMap::new(),
})
}
|
args
|
identifier_name
|
process_builder.rs
|
use std::collections::HashMap;
use std::ffi::CString;
use std::fmt::{self, Formatter};
use std::io::process::{Command, ProcessOutput, InheritFd};
use std::os;
use std::path::BytesContainer;
use util::{CargoResult, ProcessError, process_error};
#[derive(Clone, PartialEq, Show)]
pub struct ProcessBuilder {
program: CString,
args: Vec<CString>,
env: HashMap<String, Option<CString>>,
cwd: Path,
}
impl fmt::String for ProcessBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "`{}", String::from_utf8_lossy(self.program.as_bytes())));
for arg in self.args.iter() {
try!(write!(f, " {}", String::from_utf8_lossy(arg.as_bytes())));
}
write!(f, "`")
}
}
impl ProcessBuilder {
pub fn arg<T: BytesContainer>(mut self, arg: T) -> ProcessBuilder
|
pub fn args<T: BytesContainer>(mut self, arguments: &[T]) -> ProcessBuilder {
self.args.extend(arguments.iter().map(|t| {
CString::from_slice(t.container_as_bytes())
}));
self
}
pub fn get_args(&self) -> &[CString] {
self.args.as_slice()
}
pub fn cwd(mut self, path: Path) -> ProcessBuilder {
self.cwd = path;
self
}
pub fn env<T: BytesContainer>(mut self, key: &str,
val: Option<T>) -> ProcessBuilder {
let val = val.map(|t| CString::from_slice(t.container_as_bytes()));
self.env.insert(key.to_string(), val);
self
}
// TODO: should InheritFd be hardcoded?
pub fn exec(&self) -> Result<(), ProcessError> {
let mut command = self.build_command();
command.stdout(InheritFd(1))
.stderr(InheritFd(2))
.stdin(InheritFd(0));
let exit = try!(command.status().map_err(|e| {
process_error(format!("Could not execute process `{}`",
self.debug_string()),
Some(e), None, None)
}));
if exit.success() {
Ok(())
} else {
Err(process_error(format!("Process didn't exit successfully: `{}`",
self.debug_string()),
None, Some(&exit), None))
}
}
pub fn exec_with_output(&self) -> Result<ProcessOutput, ProcessError> {
let command = self.build_command();
let output = try!(command.output().map_err(|e| {
process_error(format!("Could not execute process `{}`",
self.debug_string()),
Some(e), None, None)
}));
if output.status.success() {
Ok(output)
} else {
Err(process_error(format!("Process didn't exit successfully: `{}`",
self.debug_string()),
None, Some(&output.status), Some(&output)))
}
}
pub fn build_command(&self) -> Command {
let mut command = Command::new(&self.program);
command.cwd(&self.cwd);
for arg in self.args.iter() {
command.arg(arg);
}
for (k, v) in self.env.iter() {
let k = k.as_slice();
match *v {
Some(ref v) => { command.env(k, v); }
None => { command.env_remove(k); }
}
}
command
}
fn debug_string(&self) -> String {
let mut program = format!("{}", String::from_utf8_lossy(self.program.as_bytes()));
for arg in self.args.iter() {
program.push(' ');
program.push_str(&format!("{}", String::from_utf8_lossy(arg.as_bytes()))[]);
}
program
}
}
pub fn process<T: BytesContainer>(cmd: T) -> CargoResult<ProcessBuilder> {
Ok(ProcessBuilder {
program: CString::from_slice(cmd.container_as_bytes()),
args: Vec::new(),
cwd: try!(os::getcwd()),
env: HashMap::new(),
})
}
|
{
self.args.push(CString::from_slice(arg.container_as_bytes()));
self
}
|
identifier_body
|
process_builder.rs
|
use std::collections::HashMap;
use std::ffi::CString;
use std::fmt::{self, Formatter};
use std::io::process::{Command, ProcessOutput, InheritFd};
use std::os;
use std::path::BytesContainer;
use util::{CargoResult, ProcessError, process_error};
#[derive(Clone, PartialEq, Show)]
pub struct ProcessBuilder {
program: CString,
args: Vec<CString>,
env: HashMap<String, Option<CString>>,
cwd: Path,
}
impl fmt::String for ProcessBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "`{}", String::from_utf8_lossy(self.program.as_bytes())));
for arg in self.args.iter() {
try!(write!(f, " {}", String::from_utf8_lossy(arg.as_bytes())));
}
write!(f, "`")
}
}
impl ProcessBuilder {
pub fn arg<T: BytesContainer>(mut self, arg: T) -> ProcessBuilder {
self.args.push(CString::from_slice(arg.container_as_bytes()));
self
}
pub fn args<T: BytesContainer>(mut self, arguments: &[T]) -> ProcessBuilder {
self.args.extend(arguments.iter().map(|t| {
CString::from_slice(t.container_as_bytes())
}));
self
}
pub fn get_args(&self) -> &[CString] {
self.args.as_slice()
}
pub fn cwd(mut self, path: Path) -> ProcessBuilder {
self.cwd = path;
self
}
pub fn env<T: BytesContainer>(mut self, key: &str,
val: Option<T>) -> ProcessBuilder {
let val = val.map(|t| CString::from_slice(t.container_as_bytes()));
self.env.insert(key.to_string(), val);
self
}
// TODO: should InheritFd be hardcoded?
pub fn exec(&self) -> Result<(), ProcessError> {
let mut command = self.build_command();
command.stdout(InheritFd(1))
.stderr(InheritFd(2))
.stdin(InheritFd(0));
let exit = try!(command.status().map_err(|e| {
process_error(format!("Could not execute process `{}`",
self.debug_string()),
Some(e), None, None)
}));
if exit.success() {
Ok(())
} else {
Err(process_error(format!("Process didn't exit successfully: `{}`",
self.debug_string()),
None, Some(&exit), None))
}
}
pub fn exec_with_output(&self) -> Result<ProcessOutput, ProcessError> {
let command = self.build_command();
let output = try!(command.output().map_err(|e| {
process_error(format!("Could not execute process `{}`",
self.debug_string()),
Some(e), None, None)
}));
if output.status.success() {
Ok(output)
} else {
Err(process_error(format!("Process didn't exit successfully: `{}`",
self.debug_string()),
None, Some(&output.status), Some(&output)))
}
}
pub fn build_command(&self) -> Command {
let mut command = Command::new(&self.program);
command.cwd(&self.cwd);
for arg in self.args.iter() {
command.arg(arg);
}
for (k, v) in self.env.iter() {
let k = k.as_slice();
match *v {
Some(ref v) => { command.env(k, v); }
None => { command.env_remove(k); }
}
}
command
}
fn debug_string(&self) -> String {
let mut program = format!("{}", String::from_utf8_lossy(self.program.as_bytes()));
for arg in self.args.iter() {
|
}
program
}
}
pub fn process<T: BytesContainer>(cmd: T) -> CargoResult<ProcessBuilder> {
Ok(ProcessBuilder {
program: CString::from_slice(cmd.container_as_bytes()),
args: Vec::new(),
cwd: try!(os::getcwd()),
env: HashMap::new(),
})
}
|
program.push(' ');
program.push_str(&format!("{}", String::from_utf8_lossy(arg.as_bytes()))[]);
|
random_line_split
|
inline.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use metadata::csearch;
use middle::astencode;
use middle::trans::base::{push_ctxt, impl_self, no_self};
use middle::trans::base::{trans_item, get_item_val, trans_fn};
use middle::trans::common::*;
use middle::ty;
use util::ppaux::ty_to_str;
use std::vec;
use syntax::ast;
use syntax::ast_map::path_name;
use syntax::ast_util::local_def;
pub fn
|
(ccx: @mut CrateContext, fn_id: ast::def_id)
-> ast::def_id {
let _icx = push_ctxt("maybe_instantiate_inline");
match ccx.external.find(&fn_id) {
Some(&Some(node_id)) => {
// Already inline
debug!("maybe_instantiate_inline(%s): already inline as node id %d",
ty::item_path_str(ccx.tcx, fn_id), node_id);
return local_def(node_id);
}
Some(&None) => {
return fn_id; // Not inlinable
}
None => {
// Not seen yet
}
}
let csearch_result =
csearch::maybe_get_item_ast(
ccx.tcx, fn_id,
|a,b,c,d| {
astencode::decode_inlined_item(a, b, ccx.maps,
/*bad*/ copy c, d)
});
return match csearch_result {
csearch::not_found => {
ccx.external.insert(fn_id, None);
fn_id
}
csearch::found(ast::ii_item(item)) => {
ccx.external.insert(fn_id, Some(item.id));
ccx.stats.n_inlines += 1;
trans_item(ccx, item);
local_def(item.id)
}
csearch::found(ast::ii_foreign(item)) => {
ccx.external.insert(fn_id, Some(item.id));
local_def(item.id)
}
csearch::found_parent(parent_id, ast::ii_item(item)) => {
ccx.external.insert(parent_id, Some(item.id));
let mut my_id = 0;
match item.node {
ast::item_enum(_, _) => {
let vs_here = ty::enum_variants(ccx.tcx, local_def(item.id));
let vs_there = ty::enum_variants(ccx.tcx, parent_id);
for vs_here.iter().zip(vs_there.iter()).advance |(here, there)| {
if there.id == fn_id { my_id = here.id.node; }
ccx.external.insert(there.id, Some(here.id.node));
}
}
_ => ccx.sess.bug("maybe_instantiate_inline: item has a \
non-enum parent")
}
trans_item(ccx, item);
local_def(my_id)
}
csearch::found_parent(_, _) => {
ccx.sess.bug("maybe_get_item_ast returned a found_parent \
with a non-item parent");
}
csearch::found(ast::ii_method(impl_did, is_provided, mth)) => {
ccx.stats.n_inlines += 1;
ccx.external.insert(fn_id, Some(mth.id));
// If this is a default method, we can't look up the
// impl type. But we aren't going to translate anyways, so don't.
if is_provided { return local_def(mth.id); }
let impl_tpt = ty::lookup_item_type(ccx.tcx, impl_did);
let num_type_params =
impl_tpt.generics.type_param_defs.len() +
mth.generics.ty_params.len();
if num_type_params == 0 {
let llfn = get_item_val(ccx, mth.id);
let path = vec::append(
ty::item_path(ccx.tcx, impl_did),
[path_name(mth.ident)]);
let self_kind = match mth.explicit_self.node {
ast::sty_static => no_self,
_ => {
let self_ty = ty::node_id_to_type(ccx.tcx,
mth.self_id);
debug!("calling inline trans_fn with self_ty %s",
ty_to_str(ccx.tcx, self_ty));
match mth.explicit_self.node {
ast::sty_value => impl_self(self_ty, ty::ByRef),
_ => impl_self(self_ty, ty::ByCopy),
}
}
};
trans_fn(ccx,
path,
&mth.decl,
&mth.body,
llfn,
self_kind,
None,
mth.id,
[]);
}
local_def(mth.id)
}
};
}
|
maybe_instantiate_inline
|
identifier_name
|
inline.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use metadata::csearch;
use middle::astencode;
use middle::trans::base::{push_ctxt, impl_self, no_self};
use middle::trans::base::{trans_item, get_item_val, trans_fn};
use middle::trans::common::*;
use middle::ty;
use util::ppaux::ty_to_str;
use std::vec;
use syntax::ast;
use syntax::ast_map::path_name;
use syntax::ast_util::local_def;
pub fn maybe_instantiate_inline(ccx: @mut CrateContext, fn_id: ast::def_id)
-> ast::def_id {
let _icx = push_ctxt("maybe_instantiate_inline");
match ccx.external.find(&fn_id) {
Some(&Some(node_id)) => {
// Already inline
debug!("maybe_instantiate_inline(%s): already inline as node id %d",
ty::item_path_str(ccx.tcx, fn_id), node_id);
return local_def(node_id);
}
Some(&None) => {
return fn_id; // Not inlinable
}
None => {
// Not seen yet
}
}
let csearch_result =
csearch::maybe_get_item_ast(
ccx.tcx, fn_id,
|a,b,c,d| {
astencode::decode_inlined_item(a, b, ccx.maps,
/*bad*/ copy c, d)
});
return match csearch_result {
csearch::not_found => {
ccx.external.insert(fn_id, None);
fn_id
}
csearch::found(ast::ii_item(item)) => {
ccx.external.insert(fn_id, Some(item.id));
ccx.stats.n_inlines += 1;
trans_item(ccx, item);
local_def(item.id)
}
csearch::found(ast::ii_foreign(item)) => {
ccx.external.insert(fn_id, Some(item.id));
local_def(item.id)
}
csearch::found_parent(parent_id, ast::ii_item(item)) => {
ccx.external.insert(parent_id, Some(item.id));
let mut my_id = 0;
match item.node {
ast::item_enum(_, _) => {
let vs_here = ty::enum_variants(ccx.tcx, local_def(item.id));
let vs_there = ty::enum_variants(ccx.tcx, parent_id);
for vs_here.iter().zip(vs_there.iter()).advance |(here, there)| {
if there.id == fn_id { my_id = here.id.node; }
ccx.external.insert(there.id, Some(here.id.node));
}
}
_ => ccx.sess.bug("maybe_instantiate_inline: item has a \
|
trans_item(ccx, item);
local_def(my_id)
}
csearch::found_parent(_, _) => {
ccx.sess.bug("maybe_get_item_ast returned a found_parent \
with a non-item parent");
}
csearch::found(ast::ii_method(impl_did, is_provided, mth)) => {
ccx.stats.n_inlines += 1;
ccx.external.insert(fn_id, Some(mth.id));
// If this is a default method, we can't look up the
// impl type. But we aren't going to translate anyways, so don't.
if is_provided { return local_def(mth.id); }
let impl_tpt = ty::lookup_item_type(ccx.tcx, impl_did);
let num_type_params =
impl_tpt.generics.type_param_defs.len() +
mth.generics.ty_params.len();
if num_type_params == 0 {
let llfn = get_item_val(ccx, mth.id);
let path = vec::append(
ty::item_path(ccx.tcx, impl_did),
[path_name(mth.ident)]);
let self_kind = match mth.explicit_self.node {
ast::sty_static => no_self,
_ => {
let self_ty = ty::node_id_to_type(ccx.tcx,
mth.self_id);
debug!("calling inline trans_fn with self_ty %s",
ty_to_str(ccx.tcx, self_ty));
match mth.explicit_self.node {
ast::sty_value => impl_self(self_ty, ty::ByRef),
_ => impl_self(self_ty, ty::ByCopy),
}
}
};
trans_fn(ccx,
path,
&mth.decl,
&mth.body,
llfn,
self_kind,
None,
mth.id,
[]);
}
local_def(mth.id)
}
};
}
|
non-enum parent")
}
|
random_line_split
|
inline.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use metadata::csearch;
use middle::astencode;
use middle::trans::base::{push_ctxt, impl_self, no_self};
use middle::trans::base::{trans_item, get_item_val, trans_fn};
use middle::trans::common::*;
use middle::ty;
use util::ppaux::ty_to_str;
use std::vec;
use syntax::ast;
use syntax::ast_map::path_name;
use syntax::ast_util::local_def;
pub fn maybe_instantiate_inline(ccx: @mut CrateContext, fn_id: ast::def_id)
-> ast::def_id
|
|a,b,c,d| {
astencode::decode_inlined_item(a, b, ccx.maps,
/*bad*/ copy c, d)
});
return match csearch_result {
csearch::not_found => {
ccx.external.insert(fn_id, None);
fn_id
}
csearch::found(ast::ii_item(item)) => {
ccx.external.insert(fn_id, Some(item.id));
ccx.stats.n_inlines += 1;
trans_item(ccx, item);
local_def(item.id)
}
csearch::found(ast::ii_foreign(item)) => {
ccx.external.insert(fn_id, Some(item.id));
local_def(item.id)
}
csearch::found_parent(parent_id, ast::ii_item(item)) => {
ccx.external.insert(parent_id, Some(item.id));
let mut my_id = 0;
match item.node {
ast::item_enum(_, _) => {
let vs_here = ty::enum_variants(ccx.tcx, local_def(item.id));
let vs_there = ty::enum_variants(ccx.tcx, parent_id);
for vs_here.iter().zip(vs_there.iter()).advance |(here, there)| {
if there.id == fn_id { my_id = here.id.node; }
ccx.external.insert(there.id, Some(here.id.node));
}
}
_ => ccx.sess.bug("maybe_instantiate_inline: item has a \
non-enum parent")
}
trans_item(ccx, item);
local_def(my_id)
}
csearch::found_parent(_, _) => {
ccx.sess.bug("maybe_get_item_ast returned a found_parent \
with a non-item parent");
}
csearch::found(ast::ii_method(impl_did, is_provided, mth)) => {
ccx.stats.n_inlines += 1;
ccx.external.insert(fn_id, Some(mth.id));
// If this is a default method, we can't look up the
// impl type. But we aren't going to translate anyways, so don't.
if is_provided { return local_def(mth.id); }
let impl_tpt = ty::lookup_item_type(ccx.tcx, impl_did);
let num_type_params =
impl_tpt.generics.type_param_defs.len() +
mth.generics.ty_params.len();
if num_type_params == 0 {
let llfn = get_item_val(ccx, mth.id);
let path = vec::append(
ty::item_path(ccx.tcx, impl_did),
[path_name(mth.ident)]);
let self_kind = match mth.explicit_self.node {
ast::sty_static => no_self,
_ => {
let self_ty = ty::node_id_to_type(ccx.tcx,
mth.self_id);
debug!("calling inline trans_fn with self_ty %s",
ty_to_str(ccx.tcx, self_ty));
match mth.explicit_self.node {
ast::sty_value => impl_self(self_ty, ty::ByRef),
_ => impl_self(self_ty, ty::ByCopy),
}
}
};
trans_fn(ccx,
path,
&mth.decl,
&mth.body,
llfn,
self_kind,
None,
mth.id,
[]);
}
local_def(mth.id)
}
};
}
|
{
let _icx = push_ctxt("maybe_instantiate_inline");
match ccx.external.find(&fn_id) {
Some(&Some(node_id)) => {
// Already inline
debug!("maybe_instantiate_inline(%s): already inline as node id %d",
ty::item_path_str(ccx.tcx, fn_id), node_id);
return local_def(node_id);
}
Some(&None) => {
return fn_id; // Not inlinable
}
None => {
// Not seen yet
}
}
let csearch_result =
csearch::maybe_get_item_ast(
ccx.tcx, fn_id,
|
identifier_body
|
schleifen.rs
|
fn main()
|
// for-Schleife
for c in 1..6 {
println!("for: {}", c);
}
// mit enumerate() können die Durchläufe gezählt werden (bei 0 geht es los!)
for (d,e) in (10..16).enumerate() {
println!("for mit enumerate: {}, Durchlauf: {}", e, d);
}
}
|
{
// Schleifen - loop, for, while
// loop (ohne Abbruch -> Endlosschleife)
// break - Abbruch der Schleife, continue - aktuellen Durchlauf abbrechen
let mut a = 0;
loop {
a = a + 1;
println!("loop: {}", a);
if a == 5 { break }
}
// while (Sobald Bedingung erfüllt ...)
let mut b = 0;
while b != 5 {
b += 1;
println!("while: {}", b);
}
|
identifier_body
|
schleifen.rs
|
fn
|
() {
// Schleifen - loop, for, while
// loop (ohne Abbruch -> Endlosschleife)
// break - Abbruch der Schleife, continue - aktuellen Durchlauf abbrechen
let mut a = 0;
loop {
a = a + 1;
println!("loop: {}", a);
if a == 5 { break }
}
// while (Sobald Bedingung erfüllt...)
let mut b = 0;
while b!= 5 {
b += 1;
println!("while: {}", b);
}
// for-Schleife
for c in 1..6 {
println!("for: {}", c);
}
// mit enumerate() können die Durchläufe gezählt werden (bei 0 geht es los!)
for (d,e) in (10..16).enumerate() {
println!("for mit enumerate: {}, Durchlauf: {}", e, d);
}
}
|
main
|
identifier_name
|
schleifen.rs
|
fn main() {
// Schleifen - loop, for, while
// loop (ohne Abbruch -> Endlosschleife)
// break - Abbruch der Schleife, continue - aktuellen Durchlauf abbrechen
let mut a = 0;
loop {
a = a + 1;
println!("loop: {}", a);
if a == 5 { break }
}
// while (Sobald Bedingung erfüllt...)
let mut b = 0;
while b!= 5 {
b += 1;
println!("while: {}", b);
}
// for-Schleife
for c in 1..6 {
println!("for: {}", c);
}
// mit enumerate() können die Durchläufe gezählt werden (bei 0 geht es los!)
for (d,e) in (10..16).enumerate() {
println!("for mit enumerate: {}, Durchlauf: {}", e, d);
|
}
|
}
|
random_line_split
|
schleifen.rs
|
fn main() {
// Schleifen - loop, for, while
// loop (ohne Abbruch -> Endlosschleife)
// break - Abbruch der Schleife, continue - aktuellen Durchlauf abbrechen
let mut a = 0;
loop {
a = a + 1;
println!("loop: {}", a);
if a == 5
|
}
// while (Sobald Bedingung erfüllt...)
let mut b = 0;
while b!= 5 {
b += 1;
println!("while: {}", b);
}
// for-Schleife
for c in 1..6 {
println!("for: {}", c);
}
// mit enumerate() können die Durchläufe gezählt werden (bei 0 geht es los!)
for (d,e) in (10..16).enumerate() {
println!("for mit enumerate: {}, Durchlauf: {}", e, d);
}
}
|
{ break }
|
conditional_block
|
resolver.rs
|
use std::sync::{Arc,Mutex};
use std::collections::HashMap;
use std::io::BufReader;
use rustls::{ResolvesServerCert, ClientHello};
use rustls::sign::{CertifiedKey, RSASigningKey};
use rustls::internal::pemfile;
use sozu_command::proxy::{CertificateAndKey, CertFingerprint, AddCertificate, RemoveCertificate};
use sozu_command::certificate::calculate_fingerprint_from_der;
use trie::TrieNode;
struct TlsData {
pub cert: CertifiedKey,
}
pub struct CertificateResolver {
pub domains: TrieNode<CertFingerprint>,
certificates: HashMap<CertFingerprint, TlsData>,
}
impl CertificateResolver {
pub fn new() -> CertificateResolver {
CertificateResolver {
domains: TrieNode::root(),
certificates: HashMap::new(),
}
}
pub fn add_certificate(&mut self, add_certificate: AddCertificate) -> Option<CertFingerprint> {
if let Some(certified_key) = generate_certified_key(add_certificate.certificate) {
let fingerprint = calculate_fingerprint_from_der(&certified_key.cert[0].0);
if add_certificate.names.is_empty() {
//FIXME: waiting for https://github.com/briansmith/webpki/pull/65 to merge to get the DNS names
// create a untrusted::Input
// let input = untrusted::Input::from(&certs[0].0);
// create an EndEntityCert
// let ee = webpki::EndEntityCert::from(input).unwrap()
// get names
// let dns_names = ee.list_dns_names()
// names.extend(dns_names.drain(..).map(|name| name.to_String()));
error!("the rustls proxy cannot extract the names from the certificate (fingerprint={:?})", fingerprint);
return None;
}
let mut names = add_certificate.names;
//info!("cert fingerprint: {:?}", fingerprint);
let data = TlsData {
cert: certified_key,
};
let fingerprint = CertFingerprint(fingerprint);
self.certificates.insert(fingerprint.clone(), data);
for name in names.drain(..) {
self.domains.domain_insert(name.into_bytes(), fingerprint.clone());
}
Some(fingerprint)
} else {
None
}
}
pub fn
|
(&mut self, remove_certificate: RemoveCertificate) {
if let Some(_data) = self.certificates.get(&remove_certificate.fingerprint) {
//let cert = &data.cert.cert[0];
if remove_certificate.names.is_empty() {
//FIXME: waiting for https://github.com/briansmith/webpki/pull/65 to merge to get the DNS names
// create a untrusted::Input
// let input = untrusted::Input::from(&certs[0].0);
// create an EndEntityCert
// let ee = webpki::EndEntityCert::from(input).unwrap()
// get names
// let dns_names = ee.list_dns_names()
// names.extend(dns_names.drain(..).map(|name| name.to_String()));
unimplemented!("the rustls proxy cannot extract the names from the certificate");
}
let names = remove_certificate.names;
for name in names {
self.domains.domain_remove(&name.into_bytes());
}
}
self.certificates.remove(&remove_certificate.fingerprint);
}
}
pub struct CertificateResolverWrapper(pub Mutex<CertificateResolver>);
impl CertificateResolverWrapper {
pub fn new() -> CertificateResolverWrapper {
CertificateResolverWrapper(Mutex::new(CertificateResolver::new()))
}
pub fn add_certificate(&self, add_certificate: AddCertificate) -> Option<CertFingerprint> {
if let Ok(ref mut resolver) = self.0.try_lock() {
resolver.add_certificate(add_certificate)
} else {
None
}
}
pub fn remove_certificate(&self, remove_certificate: RemoveCertificate) {
if let Ok(ref mut resolver) = self.0.try_lock() {
resolver.remove_certificate(remove_certificate)
}
}
}
impl ResolvesServerCert for CertificateResolverWrapper {
fn resolve(
&self,
client_hello: ClientHello
) -> Option<CertifiedKey> {
let server_name = client_hello.server_name();
let sigschemes = client_hello.sigschemes();
if server_name.is_none() {
error!("cannot look up certificate: no SNI from session");
return None;
}
let name: &str = server_name.unwrap().into();
trace!("trying to resolve name: {:?} for signature scheme: {:?}", name, sigschemes);
if let Ok(ref mut resolver) = self.0.try_lock() {
//resolver.domains.print();
if let Some(kv) = resolver.domains.domain_lookup(name.as_bytes(), true) {
trace!("looking for certificate for {:?} with fingerprint {:?}", name, kv.1);
return resolver.certificates.get(&kv.1).as_ref().map(|data| data.cert.clone());
}
}
error!("could not look up a certificate for server name '{}'", name);
None
}
}
pub fn generate_certified_key(certificate_and_key: CertificateAndKey) -> Option<CertifiedKey> {
let mut chain = Vec::new();
let mut cert_reader = BufReader::new(certificate_and_key.certificate.as_bytes());
let parsed_certs = pemfile::certs(&mut cert_reader);
if let Ok(certs) = parsed_certs {
for cert in certs {
chain.push(cert);
}
} else {
return None;
}
for ref cert in certificate_and_key.certificate_chain.iter() {
let mut chain_cert_reader = BufReader::new(cert.as_bytes());
if let Ok(parsed_chain_certs) = pemfile::certs(&mut chain_cert_reader) {
for cert in parsed_chain_certs {
chain.push(cert);
}
}
}
let mut key_reader = BufReader::new(certificate_and_key.key.as_bytes());
let parsed_key = pemfile::rsa_private_keys(&mut key_reader);
if let Ok(keys) = parsed_key {
if!keys.is_empty() {
if let Ok(signing_key) = RSASigningKey::new(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(Box::new(signing_key)));
return Some(certified);
}
} else {
let mut key_reader = BufReader::new(certificate_and_key.key.as_bytes());
let parsed_key = pemfile::pkcs8_private_keys(&mut key_reader);
if let Ok(keys) = parsed_key {
if!keys.is_empty() {
if let Ok(signing_key) = RSASigningKey::new(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(Box::new(signing_key)));
return Some(certified);
} else {
if let Ok(k) = rustls::sign::any_ecdsa_type(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(k));
return Some(certified);
} else {
error!("could not decode signing key (tried RSA and ECDSA)");
}
}
}
}
}
} else {
error!("could not parse private key: {:?}", parsed_key);
}
None
}
|
remove_certificate
|
identifier_name
|
resolver.rs
|
use std::sync::{Arc,Mutex};
use std::collections::HashMap;
use std::io::BufReader;
use rustls::{ResolvesServerCert, ClientHello};
use rustls::sign::{CertifiedKey, RSASigningKey};
use rustls::internal::pemfile;
use sozu_command::proxy::{CertificateAndKey, CertFingerprint, AddCertificate, RemoveCertificate};
use sozu_command::certificate::calculate_fingerprint_from_der;
use trie::TrieNode;
struct TlsData {
pub cert: CertifiedKey,
}
pub struct CertificateResolver {
pub domains: TrieNode<CertFingerprint>,
certificates: HashMap<CertFingerprint, TlsData>,
}
impl CertificateResolver {
pub fn new() -> CertificateResolver {
CertificateResolver {
domains: TrieNode::root(),
certificates: HashMap::new(),
}
}
pub fn add_certificate(&mut self, add_certificate: AddCertificate) -> Option<CertFingerprint> {
if let Some(certified_key) = generate_certified_key(add_certificate.certificate) {
let fingerprint = calculate_fingerprint_from_der(&certified_key.cert[0].0);
if add_certificate.names.is_empty() {
//FIXME: waiting for https://github.com/briansmith/webpki/pull/65 to merge to get the DNS names
// create a untrusted::Input
// let input = untrusted::Input::from(&certs[0].0);
// create an EndEntityCert
// let ee = webpki::EndEntityCert::from(input).unwrap()
// get names
// let dns_names = ee.list_dns_names()
// names.extend(dns_names.drain(..).map(|name| name.to_String()));
error!("the rustls proxy cannot extract the names from the certificate (fingerprint={:?})", fingerprint);
return None;
}
let mut names = add_certificate.names;
//info!("cert fingerprint: {:?}", fingerprint);
let data = TlsData {
cert: certified_key,
};
let fingerprint = CertFingerprint(fingerprint);
self.certificates.insert(fingerprint.clone(), data);
for name in names.drain(..) {
self.domains.domain_insert(name.into_bytes(), fingerprint.clone());
}
Some(fingerprint)
} else {
None
}
}
pub fn remove_certificate(&mut self, remove_certificate: RemoveCertificate) {
if let Some(_data) = self.certificates.get(&remove_certificate.fingerprint) {
//let cert = &data.cert.cert[0];
if remove_certificate.names.is_empty() {
//FIXME: waiting for https://github.com/briansmith/webpki/pull/65 to merge to get the DNS names
// create a untrusted::Input
// let input = untrusted::Input::from(&certs[0].0);
// create an EndEntityCert
// let ee = webpki::EndEntityCert::from(input).unwrap()
// get names
// let dns_names = ee.list_dns_names()
// names.extend(dns_names.drain(..).map(|name| name.to_String()));
unimplemented!("the rustls proxy cannot extract the names from the certificate");
}
let names = remove_certificate.names;
for name in names {
self.domains.domain_remove(&name.into_bytes());
}
}
self.certificates.remove(&remove_certificate.fingerprint);
}
}
pub struct CertificateResolverWrapper(pub Mutex<CertificateResolver>);
impl CertificateResolverWrapper {
pub fn new() -> CertificateResolverWrapper {
CertificateResolverWrapper(Mutex::new(CertificateResolver::new()))
}
pub fn add_certificate(&self, add_certificate: AddCertificate) -> Option<CertFingerprint> {
if let Ok(ref mut resolver) = self.0.try_lock() {
resolver.add_certificate(add_certificate)
} else {
None
}
}
pub fn remove_certificate(&self, remove_certificate: RemoveCertificate) {
if let Ok(ref mut resolver) = self.0.try_lock() {
resolver.remove_certificate(remove_certificate)
}
}
}
impl ResolvesServerCert for CertificateResolverWrapper {
fn resolve(
&self,
client_hello: ClientHello
) -> Option<CertifiedKey> {
let server_name = client_hello.server_name();
let sigschemes = client_hello.sigschemes();
if server_name.is_none() {
error!("cannot look up certificate: no SNI from session");
return None;
}
let name: &str = server_name.unwrap().into();
trace!("trying to resolve name: {:?} for signature scheme: {:?}", name, sigschemes);
if let Ok(ref mut resolver) = self.0.try_lock() {
//resolver.domains.print();
if let Some(kv) = resolver.domains.domain_lookup(name.as_bytes(), true)
|
}
error!("could not look up a certificate for server name '{}'", name);
None
}
}
pub fn generate_certified_key(certificate_and_key: CertificateAndKey) -> Option<CertifiedKey> {
let mut chain = Vec::new();
let mut cert_reader = BufReader::new(certificate_and_key.certificate.as_bytes());
let parsed_certs = pemfile::certs(&mut cert_reader);
if let Ok(certs) = parsed_certs {
for cert in certs {
chain.push(cert);
}
} else {
return None;
}
for ref cert in certificate_and_key.certificate_chain.iter() {
let mut chain_cert_reader = BufReader::new(cert.as_bytes());
if let Ok(parsed_chain_certs) = pemfile::certs(&mut chain_cert_reader) {
for cert in parsed_chain_certs {
chain.push(cert);
}
}
}
let mut key_reader = BufReader::new(certificate_and_key.key.as_bytes());
let parsed_key = pemfile::rsa_private_keys(&mut key_reader);
if let Ok(keys) = parsed_key {
if!keys.is_empty() {
if let Ok(signing_key) = RSASigningKey::new(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(Box::new(signing_key)));
return Some(certified);
}
} else {
let mut key_reader = BufReader::new(certificate_and_key.key.as_bytes());
let parsed_key = pemfile::pkcs8_private_keys(&mut key_reader);
if let Ok(keys) = parsed_key {
if!keys.is_empty() {
if let Ok(signing_key) = RSASigningKey::new(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(Box::new(signing_key)));
return Some(certified);
} else {
if let Ok(k) = rustls::sign::any_ecdsa_type(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(k));
return Some(certified);
} else {
error!("could not decode signing key (tried RSA and ECDSA)");
}
}
}
}
}
} else {
error!("could not parse private key: {:?}", parsed_key);
}
None
}
|
{
trace!("looking for certificate for {:?} with fingerprint {:?}", name, kv.1);
return resolver.certificates.get(&kv.1).as_ref().map(|data| data.cert.clone());
}
|
conditional_block
|
resolver.rs
|
use std::sync::{Arc,Mutex};
use std::collections::HashMap;
use std::io::BufReader;
use rustls::{ResolvesServerCert, ClientHello};
use rustls::sign::{CertifiedKey, RSASigningKey};
use rustls::internal::pemfile;
use sozu_command::proxy::{CertificateAndKey, CertFingerprint, AddCertificate, RemoveCertificate};
use sozu_command::certificate::calculate_fingerprint_from_der;
use trie::TrieNode;
struct TlsData {
pub cert: CertifiedKey,
}
pub struct CertificateResolver {
pub domains: TrieNode<CertFingerprint>,
certificates: HashMap<CertFingerprint, TlsData>,
}
impl CertificateResolver {
pub fn new() -> CertificateResolver {
CertificateResolver {
domains: TrieNode::root(),
certificates: HashMap::new(),
}
}
pub fn add_certificate(&mut self, add_certificate: AddCertificate) -> Option<CertFingerprint> {
if let Some(certified_key) = generate_certified_key(add_certificate.certificate) {
let fingerprint = calculate_fingerprint_from_der(&certified_key.cert[0].0);
if add_certificate.names.is_empty() {
//FIXME: waiting for https://github.com/briansmith/webpki/pull/65 to merge to get the DNS names
// create a untrusted::Input
// let input = untrusted::Input::from(&certs[0].0);
// create an EndEntityCert
// let ee = webpki::EndEntityCert::from(input).unwrap()
// get names
// let dns_names = ee.list_dns_names()
// names.extend(dns_names.drain(..).map(|name| name.to_String()));
error!("the rustls proxy cannot extract the names from the certificate (fingerprint={:?})", fingerprint);
return None;
}
let mut names = add_certificate.names;
//info!("cert fingerprint: {:?}", fingerprint);
let data = TlsData {
|
self.certificates.insert(fingerprint.clone(), data);
for name in names.drain(..) {
self.domains.domain_insert(name.into_bytes(), fingerprint.clone());
}
Some(fingerprint)
} else {
None
}
}
pub fn remove_certificate(&mut self, remove_certificate: RemoveCertificate) {
if let Some(_data) = self.certificates.get(&remove_certificate.fingerprint) {
//let cert = &data.cert.cert[0];
if remove_certificate.names.is_empty() {
//FIXME: waiting for https://github.com/briansmith/webpki/pull/65 to merge to get the DNS names
// create a untrusted::Input
// let input = untrusted::Input::from(&certs[0].0);
// create an EndEntityCert
// let ee = webpki::EndEntityCert::from(input).unwrap()
// get names
// let dns_names = ee.list_dns_names()
// names.extend(dns_names.drain(..).map(|name| name.to_String()));
unimplemented!("the rustls proxy cannot extract the names from the certificate");
}
let names = remove_certificate.names;
for name in names {
self.domains.domain_remove(&name.into_bytes());
}
}
self.certificates.remove(&remove_certificate.fingerprint);
}
}
pub struct CertificateResolverWrapper(pub Mutex<CertificateResolver>);
impl CertificateResolverWrapper {
pub fn new() -> CertificateResolverWrapper {
CertificateResolverWrapper(Mutex::new(CertificateResolver::new()))
}
pub fn add_certificate(&self, add_certificate: AddCertificate) -> Option<CertFingerprint> {
if let Ok(ref mut resolver) = self.0.try_lock() {
resolver.add_certificate(add_certificate)
} else {
None
}
}
pub fn remove_certificate(&self, remove_certificate: RemoveCertificate) {
if let Ok(ref mut resolver) = self.0.try_lock() {
resolver.remove_certificate(remove_certificate)
}
}
}
impl ResolvesServerCert for CertificateResolverWrapper {
fn resolve(
&self,
client_hello: ClientHello
) -> Option<CertifiedKey> {
let server_name = client_hello.server_name();
let sigschemes = client_hello.sigschemes();
if server_name.is_none() {
error!("cannot look up certificate: no SNI from session");
return None;
}
let name: &str = server_name.unwrap().into();
trace!("trying to resolve name: {:?} for signature scheme: {:?}", name, sigschemes);
if let Ok(ref mut resolver) = self.0.try_lock() {
//resolver.domains.print();
if let Some(kv) = resolver.domains.domain_lookup(name.as_bytes(), true) {
trace!("looking for certificate for {:?} with fingerprint {:?}", name, kv.1);
return resolver.certificates.get(&kv.1).as_ref().map(|data| data.cert.clone());
}
}
error!("could not look up a certificate for server name '{}'", name);
None
}
}
pub fn generate_certified_key(certificate_and_key: CertificateAndKey) -> Option<CertifiedKey> {
let mut chain = Vec::new();
let mut cert_reader = BufReader::new(certificate_and_key.certificate.as_bytes());
let parsed_certs = pemfile::certs(&mut cert_reader);
if let Ok(certs) = parsed_certs {
for cert in certs {
chain.push(cert);
}
} else {
return None;
}
for ref cert in certificate_and_key.certificate_chain.iter() {
let mut chain_cert_reader = BufReader::new(cert.as_bytes());
if let Ok(parsed_chain_certs) = pemfile::certs(&mut chain_cert_reader) {
for cert in parsed_chain_certs {
chain.push(cert);
}
}
}
let mut key_reader = BufReader::new(certificate_and_key.key.as_bytes());
let parsed_key = pemfile::rsa_private_keys(&mut key_reader);
if let Ok(keys) = parsed_key {
if!keys.is_empty() {
if let Ok(signing_key) = RSASigningKey::new(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(Box::new(signing_key)));
return Some(certified);
}
} else {
let mut key_reader = BufReader::new(certificate_and_key.key.as_bytes());
let parsed_key = pemfile::pkcs8_private_keys(&mut key_reader);
if let Ok(keys) = parsed_key {
if!keys.is_empty() {
if let Ok(signing_key) = RSASigningKey::new(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(Box::new(signing_key)));
return Some(certified);
} else {
if let Ok(k) = rustls::sign::any_ecdsa_type(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(k));
return Some(certified);
} else {
error!("could not decode signing key (tried RSA and ECDSA)");
}
}
}
}
}
} else {
error!("could not parse private key: {:?}", parsed_key);
}
None
}
|
cert: certified_key,
};
let fingerprint = CertFingerprint(fingerprint);
|
random_line_split
|
resolver.rs
|
use std::sync::{Arc,Mutex};
use std::collections::HashMap;
use std::io::BufReader;
use rustls::{ResolvesServerCert, ClientHello};
use rustls::sign::{CertifiedKey, RSASigningKey};
use rustls::internal::pemfile;
use sozu_command::proxy::{CertificateAndKey, CertFingerprint, AddCertificate, RemoveCertificate};
use sozu_command::certificate::calculate_fingerprint_from_der;
use trie::TrieNode;
struct TlsData {
pub cert: CertifiedKey,
}
pub struct CertificateResolver {
pub domains: TrieNode<CertFingerprint>,
certificates: HashMap<CertFingerprint, TlsData>,
}
impl CertificateResolver {
pub fn new() -> CertificateResolver {
CertificateResolver {
domains: TrieNode::root(),
certificates: HashMap::new(),
}
}
pub fn add_certificate(&mut self, add_certificate: AddCertificate) -> Option<CertFingerprint> {
if let Some(certified_key) = generate_certified_key(add_certificate.certificate) {
let fingerprint = calculate_fingerprint_from_der(&certified_key.cert[0].0);
if add_certificate.names.is_empty() {
//FIXME: waiting for https://github.com/briansmith/webpki/pull/65 to merge to get the DNS names
// create a untrusted::Input
// let input = untrusted::Input::from(&certs[0].0);
// create an EndEntityCert
// let ee = webpki::EndEntityCert::from(input).unwrap()
// get names
// let dns_names = ee.list_dns_names()
// names.extend(dns_names.drain(..).map(|name| name.to_String()));
error!("the rustls proxy cannot extract the names from the certificate (fingerprint={:?})", fingerprint);
return None;
}
let mut names = add_certificate.names;
//info!("cert fingerprint: {:?}", fingerprint);
let data = TlsData {
cert: certified_key,
};
let fingerprint = CertFingerprint(fingerprint);
self.certificates.insert(fingerprint.clone(), data);
for name in names.drain(..) {
self.domains.domain_insert(name.into_bytes(), fingerprint.clone());
}
Some(fingerprint)
} else {
None
}
}
pub fn remove_certificate(&mut self, remove_certificate: RemoveCertificate)
|
}
self.certificates.remove(&remove_certificate.fingerprint);
}
}
pub struct CertificateResolverWrapper(pub Mutex<CertificateResolver>);
impl CertificateResolverWrapper {
pub fn new() -> CertificateResolverWrapper {
CertificateResolverWrapper(Mutex::new(CertificateResolver::new()))
}
pub fn add_certificate(&self, add_certificate: AddCertificate) -> Option<CertFingerprint> {
if let Ok(ref mut resolver) = self.0.try_lock() {
resolver.add_certificate(add_certificate)
} else {
None
}
}
pub fn remove_certificate(&self, remove_certificate: RemoveCertificate) {
if let Ok(ref mut resolver) = self.0.try_lock() {
resolver.remove_certificate(remove_certificate)
}
}
}
impl ResolvesServerCert for CertificateResolverWrapper {
fn resolve(
&self,
client_hello: ClientHello
) -> Option<CertifiedKey> {
let server_name = client_hello.server_name();
let sigschemes = client_hello.sigschemes();
if server_name.is_none() {
error!("cannot look up certificate: no SNI from session");
return None;
}
let name: &str = server_name.unwrap().into();
trace!("trying to resolve name: {:?} for signature scheme: {:?}", name, sigschemes);
if let Ok(ref mut resolver) = self.0.try_lock() {
//resolver.domains.print();
if let Some(kv) = resolver.domains.domain_lookup(name.as_bytes(), true) {
trace!("looking for certificate for {:?} with fingerprint {:?}", name, kv.1);
return resolver.certificates.get(&kv.1).as_ref().map(|data| data.cert.clone());
}
}
error!("could not look up a certificate for server name '{}'", name);
None
}
}
pub fn generate_certified_key(certificate_and_key: CertificateAndKey) -> Option<CertifiedKey> {
let mut chain = Vec::new();
let mut cert_reader = BufReader::new(certificate_and_key.certificate.as_bytes());
let parsed_certs = pemfile::certs(&mut cert_reader);
if let Ok(certs) = parsed_certs {
for cert in certs {
chain.push(cert);
}
} else {
return None;
}
for ref cert in certificate_and_key.certificate_chain.iter() {
let mut chain_cert_reader = BufReader::new(cert.as_bytes());
if let Ok(parsed_chain_certs) = pemfile::certs(&mut chain_cert_reader) {
for cert in parsed_chain_certs {
chain.push(cert);
}
}
}
let mut key_reader = BufReader::new(certificate_and_key.key.as_bytes());
let parsed_key = pemfile::rsa_private_keys(&mut key_reader);
if let Ok(keys) = parsed_key {
if!keys.is_empty() {
if let Ok(signing_key) = RSASigningKey::new(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(Box::new(signing_key)));
return Some(certified);
}
} else {
let mut key_reader = BufReader::new(certificate_and_key.key.as_bytes());
let parsed_key = pemfile::pkcs8_private_keys(&mut key_reader);
if let Ok(keys) = parsed_key {
if!keys.is_empty() {
if let Ok(signing_key) = RSASigningKey::new(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(Box::new(signing_key)));
return Some(certified);
} else {
if let Ok(k) = rustls::sign::any_ecdsa_type(&keys[0]) {
let certified = CertifiedKey::new(chain, Arc::new(k));
return Some(certified);
} else {
error!("could not decode signing key (tried RSA and ECDSA)");
}
}
}
}
}
} else {
error!("could not parse private key: {:?}", parsed_key);
}
None
}
|
{
if let Some(_data) = self.certificates.get(&remove_certificate.fingerprint) {
//let cert = &data.cert.cert[0];
if remove_certificate.names.is_empty() {
//FIXME: waiting for https://github.com/briansmith/webpki/pull/65 to merge to get the DNS names
// create a untrusted::Input
// let input = untrusted::Input::from(&certs[0].0);
// create an EndEntityCert
// let ee = webpki::EndEntityCert::from(input).unwrap()
// get names
// let dns_names = ee.list_dns_names()
// names.extend(dns_names.drain(..).map(|name| name.to_String()));
unimplemented!("the rustls proxy cannot extract the names from the certificate");
}
let names = remove_certificate.names;
for name in names {
self.domains.domain_remove(&name.into_bytes());
}
|
identifier_body
|
footer.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{generated::css_classes::C, Msg};
use chrono::{offset::Local, Datelike};
use iml_wire_types::{Branding, Conf};
use seed::{prelude::*, *};
pub fn view(conf: &Conf) -> impl View<Msg> {
let year = Local::now().year();
let footer_string = match conf.branding {
Branding::Whamcloud => format!(
"Integrated Manager for Lustre software {} is Copyright © ",
conf.version
),
_ => {
if let Some(version) = &conf.exa_version {
format!("© 2020 - DDN EXAScaler v{} ", version)
|
"© 2020 - DDN EXAScaler ".to_string()
}
}
};
let footer_text = match conf.branding {
Branding::Whamcloud => div![
footer_string,
year.to_string(),
" DDN. All rights reserved.".to_string(),
],
_ => div![footer_string],
};
footer![
class![C.h_5, C.flex, C.justify_center],
div![class![C.px_5, C.text_sm, C.items_center,], footer_text]
]
}
|
} else {
|
random_line_split
|
footer.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{generated::css_classes::C, Msg};
use chrono::{offset::Local, Datelike};
use iml_wire_types::{Branding, Conf};
use seed::{prelude::*, *};
pub fn view(conf: &Conf) -> impl View<Msg>
|
year.to_string(),
" DDN. All rights reserved.".to_string(),
],
_ => div![footer_string],
};
footer![
class![C.h_5, C.flex, C.justify_center],
div![class![C.px_5, C.text_sm, C.items_center,], footer_text]
]
}
|
{
let year = Local::now().year();
let footer_string = match conf.branding {
Branding::Whamcloud => format!(
"Integrated Manager for Lustre software {} is Copyright © ",
conf.version
),
_ => {
if let Some(version) = &conf.exa_version {
format!("© 2020 - DDN EXAScaler v{} ", version)
} else {
"© 2020 - DDN EXAScaler ".to_string()
}
}
};
let footer_text = match conf.branding {
Branding::Whamcloud => div![
footer_string,
|
identifier_body
|
footer.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{generated::css_classes::C, Msg};
use chrono::{offset::Local, Datelike};
use iml_wire_types::{Branding, Conf};
use seed::{prelude::*, *};
pub fn
|
(conf: &Conf) -> impl View<Msg> {
let year = Local::now().year();
let footer_string = match conf.branding {
Branding::Whamcloud => format!(
"Integrated Manager for Lustre software {} is Copyright © ",
conf.version
),
_ => {
if let Some(version) = &conf.exa_version {
format!("© 2020 - DDN EXAScaler v{} ", version)
} else {
"© 2020 - DDN EXAScaler ".to_string()
}
}
};
let footer_text = match conf.branding {
Branding::Whamcloud => div![
footer_string,
year.to_string(),
" DDN. All rights reserved.".to_string(),
],
_ => div![footer_string],
};
footer![
class![C.h_5, C.flex, C.justify_center],
div![class![C.px_5, C.text_sm, C.items_center,], footer_text]
]
}
|
view
|
identifier_name
|
compositor_thread.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Communication with the compositor thread.
use SendableFrameTree;
use compositor::CompositingReason;
use euclid::{Point2D, Size2D};
use gfx_traits::Epoch;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::{Key, KeyModifiers, KeyState, PipelineId, TopLevelBrowsingContextId};
use net_traits::image::base::Image;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{AnimationState, ConstellationMsg, EventResult, LoadData};
use servo_url::ServoUrl;
use std::fmt::{Debug, Error, Formatter};
use std::sync::mpsc::{Receiver, Sender};
use style_traits::cursor::Cursor;
use style_traits::viewport::ViewportConstraints;
use webrender;
use webrender_api;
/// Used to wake up the event loop, provided by the servo port/embedder.
pub trait EventLoopWaker :'static + Send {
fn clone(&self) -> Box<EventLoopWaker + Send>;
fn wake(&self);
}
/// Sends messages to the embedder.
pub struct EmbedderProxy {
pub sender: Sender<EmbedderMsg>,
pub event_loop_waker: Box<EventLoopWaker>,
}
impl EmbedderProxy {
pub fn send(&self, msg: EmbedderMsg) {
// Send a message and kick the OS event loop awake.
if let Err(err) = self.sender.send(msg) {
warn!("Failed to send response ({}).", err);
}
self.event_loop_waker.wake();
}
}
impl Clone for EmbedderProxy {
fn clone(&self) -> EmbedderProxy {
EmbedderProxy {
sender: self.sender.clone(),
event_loop_waker: self.event_loop_waker.clone(),
}
}
}
/// The port that the embedder receives messages on.
pub struct EmbedderReceiver {
pub receiver: Receiver<EmbedderMsg>
}
impl EmbedderReceiver {
pub fn try_recv_embedder_msg(&mut self) -> Option<EmbedderMsg> {
self.receiver.try_recv().ok()
}
pub fn recv_embedder_msg(&mut self) -> EmbedderMsg {
self.receiver.recv().unwrap()
}
}
/// Sends messages to the compositor.
pub struct CompositorProxy {
pub sender: Sender<Msg>,
pub event_loop_waker: Box<EventLoopWaker>,
}
impl CompositorProxy {
pub fn send(&self, msg: Msg) {
// Send a message and kick the OS event loop awake.
if let Err(err) = self.sender.send(msg) {
warn!("Failed to send response ({}).", err);
}
self.event_loop_waker.wake();
}
}
impl Clone for CompositorProxy {
fn clone(&self) -> CompositorProxy {
CompositorProxy {
sender: self.sender.clone(),
event_loop_waker: self.event_loop_waker.clone(),
}
}
}
/// The port that the compositor receives messages on.
pub struct CompositorReceiver {
pub receiver: Receiver<Msg>
}
impl CompositorReceiver {
pub fn try_recv_compositor_msg(&mut self) -> Option<Msg> {
self.receiver.try_recv().ok()
}
pub fn recv_compositor_msg(&mut self) -> Msg {
self.receiver.recv().unwrap()
}
}
impl CompositorProxy {
pub fn recomposite(&self, reason: CompositingReason) {
self.send(Msg::Recomposite(reason));
}
}
pub enum EmbedderMsg {
/// A status message to be displayed by the browser chrome.
Status(TopLevelBrowsingContextId, Option<String>),
/// Alerts the embedder that the current page has changed its title.
ChangePageTitle(TopLevelBrowsingContextId, Option<String>),
/// Move the window to a point
MoveTo(TopLevelBrowsingContextId, Point2D<i32>),
/// Resize the window to size
ResizeTo(TopLevelBrowsingContextId, Size2D<u32>),
/// Get Window Informations size and position
GetClientWindow(TopLevelBrowsingContextId, IpcSender<(Size2D<u32>, Point2D<i32>)>),
/// Wether or not to follow a link
AllowNavigation(TopLevelBrowsingContextId, ServoUrl, IpcSender<bool>),
/// Sends an unconsumed key event back to the embedder.
KeyEvent(Option<TopLevelBrowsingContextId>, Option<char>, Key, KeyState, KeyModifiers),
/// Changes the cursor.
SetCursor(Cursor),
/// A favicon was detected
NewFavicon(TopLevelBrowsingContextId, ServoUrl),
/// <head> tag finished parsing
HeadParsed(TopLevelBrowsingContextId),
/// The history state has changed.
HistoryChanged(TopLevelBrowsingContextId, Vec<LoadData>, usize),
/// Enter or exit fullscreen
SetFullscreenState(TopLevelBrowsingContextId, bool),
/// The load of a page has begun
LoadStart(TopLevelBrowsingContextId),
/// The load of a page has completed
LoadComplete(TopLevelBrowsingContextId),
}
/// Messages from the painting thread and the constellation thread to the compositor thread.
pub enum Msg {
/// Requests that the compositor shut down.
Exit,
/// Informs the compositor that the constellation has completed shutdown.
/// Required because the constellation can have pending calls to make
/// (e.g. SetFrameTree) at the time that we send it an ExitMsg.
ShutdownComplete,
/// Alerts the compositor that the given pipeline has changed whether it is running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Replaces the current frame tree, typically called during main frame navigation.
SetFrameTree(SendableFrameTree),
/// Composite.
Recomposite(CompositingReason),
/// Script has handled a touch event, and either prevented or allowed default actions.
TouchEventProcessed(EventResult),
/// Composite to a PNG file and return the Image over a passed channel.
CreatePng(IpcSender<Option<Image>>),
/// Alerts the compositor that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
/// A reply to the compositor asking if the output image is stable.
IsReadyToSaveImageReply(bool),
/// Pipeline visibility changed
PipelineVisibilityChanged(PipelineId, bool),
/// WebRender has successfully processed a scroll. The boolean specifies whether a composite is
/// needed.
NewScrollFrameReady(bool),
/// A pipeline was shut down.
// This message acts as a synchronization point between the constellation,
// when it shuts down a pipeline, to the compositor; when the compositor
// sends a reply on the IpcSender, the constellation knows it's safe to
// tear down the other threads associated with this pipeline.
PipelineExited(PipelineId, IpcSender<()>),
/// Runs a closure in the compositor thread.
/// It's used to dispatch functions from webrender to the main thread's event loop.
/// Required to allow WGL GLContext sharing in Windows.
Dispatch(Box<Fn() + Send>),
/// Indicates to the compositor that it needs to record the time when the frame with
/// the given ID (epoch) is painted and report it to the layout thread of the given
/// pipeline ID.
PendingPaintMetric(PipelineId, Epoch),
/// The load of a page has completed
LoadComplete(TopLevelBrowsingContextId),
}
impl Debug for Msg {
fn
|
(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
Msg::Exit => write!(f, "Exit"),
Msg::ShutdownComplete => write!(f, "ShutdownComplete"),
Msg::ChangeRunningAnimationsState(..) => write!(f, "ChangeRunningAnimationsState"),
Msg::SetFrameTree(..) => write!(f, "SetFrameTree"),
Msg::Recomposite(..) => write!(f, "Recomposite"),
Msg::TouchEventProcessed(..) => write!(f, "TouchEventProcessed"),
Msg::CreatePng(..) => write!(f, "CreatePng"),
Msg::ViewportConstrained(..) => write!(f, "ViewportConstrained"),
Msg::IsReadyToSaveImageReply(..) => write!(f, "IsReadyToSaveImageReply"),
Msg::PipelineVisibilityChanged(..) => write!(f, "PipelineVisibilityChanged"),
Msg::PipelineExited(..) => write!(f, "PipelineExited"),
Msg::NewScrollFrameReady(..) => write!(f, "NewScrollFrameReady"),
Msg::Dispatch(..) => write!(f, "Dispatch"),
Msg::PendingPaintMetric(..) => write!(f, "PendingPaintMetric"),
Msg::LoadComplete(..) => write!(f, "LoadComplete"),
}
}
}
impl Debug for EmbedderMsg {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
EmbedderMsg::Status(..) => write!(f, "Status"),
EmbedderMsg::ChangePageTitle(..) => write!(f, "ChangePageTitle"),
EmbedderMsg::MoveTo(..) => write!(f, "MoveTo"),
EmbedderMsg::ResizeTo(..) => write!(f, "ResizeTo"),
EmbedderMsg::GetClientWindow(..) => write!(f, "GetClientWindow"),
EmbedderMsg::AllowNavigation(..) => write!(f, "AllowNavigation"),
EmbedderMsg::KeyEvent(..) => write!(f, "KeyEvent"),
EmbedderMsg::SetCursor(..) => write!(f, "SetCursor"),
EmbedderMsg::NewFavicon(..) => write!(f, "NewFavicon"),
EmbedderMsg::HeadParsed(..) => write!(f, "HeadParsed"),
EmbedderMsg::HistoryChanged(..) => write!(f, "HistoryChanged"),
EmbedderMsg::SetFullscreenState(..) => write!(f, "SetFullscreenState"),
EmbedderMsg::LoadStart(..) => write!(f, "LoadStart"),
EmbedderMsg::LoadComplete(..) => write!(f, "LoadComplete"),
}
}
}
/// Data used to construct a compositor.
pub struct InitialCompositorState {
/// A channel to the compositor.
pub sender: CompositorProxy,
/// A port on which messages inbound to the compositor can be received.
pub receiver: CompositorReceiver,
/// A channel to the constellation.
pub constellation_chan: Sender<ConstellationMsg>,
/// A channel to the time profiler thread.
pub time_profiler_chan: time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// Instance of webrender API
pub webrender: webrender::Renderer,
pub webrender_document: webrender_api::DocumentId,
pub webrender_api: webrender_api::RenderApi,
}
|
fmt
|
identifier_name
|
compositor_thread.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Communication with the compositor thread.
use SendableFrameTree;
use compositor::CompositingReason;
use euclid::{Point2D, Size2D};
use gfx_traits::Epoch;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::{Key, KeyModifiers, KeyState, PipelineId, TopLevelBrowsingContextId};
use net_traits::image::base::Image;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{AnimationState, ConstellationMsg, EventResult, LoadData};
use servo_url::ServoUrl;
use std::fmt::{Debug, Error, Formatter};
use std::sync::mpsc::{Receiver, Sender};
use style_traits::cursor::Cursor;
use style_traits::viewport::ViewportConstraints;
use webrender;
use webrender_api;
/// Used to wake up the event loop, provided by the servo port/embedder.
pub trait EventLoopWaker :'static + Send {
fn clone(&self) -> Box<EventLoopWaker + Send>;
fn wake(&self);
}
/// Sends messages to the embedder.
pub struct EmbedderProxy {
pub sender: Sender<EmbedderMsg>,
pub event_loop_waker: Box<EventLoopWaker>,
}
impl EmbedderProxy {
pub fn send(&self, msg: EmbedderMsg) {
// Send a message and kick the OS event loop awake.
if let Err(err) = self.sender.send(msg) {
warn!("Failed to send response ({}).", err);
}
self.event_loop_waker.wake();
}
}
impl Clone for EmbedderProxy {
fn clone(&self) -> EmbedderProxy {
EmbedderProxy {
sender: self.sender.clone(),
event_loop_waker: self.event_loop_waker.clone(),
}
}
}
/// The port that the embedder receives messages on.
pub struct EmbedderReceiver {
pub receiver: Receiver<EmbedderMsg>
}
impl EmbedderReceiver {
pub fn try_recv_embedder_msg(&mut self) -> Option<EmbedderMsg> {
self.receiver.try_recv().ok()
}
pub fn recv_embedder_msg(&mut self) -> EmbedderMsg {
self.receiver.recv().unwrap()
|
}
}
/// Sends messages to the compositor.
pub struct CompositorProxy {
pub sender: Sender<Msg>,
pub event_loop_waker: Box<EventLoopWaker>,
}
impl CompositorProxy {
pub fn send(&self, msg: Msg) {
// Send a message and kick the OS event loop awake.
if let Err(err) = self.sender.send(msg) {
warn!("Failed to send response ({}).", err);
}
self.event_loop_waker.wake();
}
}
impl Clone for CompositorProxy {
fn clone(&self) -> CompositorProxy {
CompositorProxy {
sender: self.sender.clone(),
event_loop_waker: self.event_loop_waker.clone(),
}
}
}
/// The port that the compositor receives messages on.
pub struct CompositorReceiver {
pub receiver: Receiver<Msg>
}
impl CompositorReceiver {
pub fn try_recv_compositor_msg(&mut self) -> Option<Msg> {
self.receiver.try_recv().ok()
}
pub fn recv_compositor_msg(&mut self) -> Msg {
self.receiver.recv().unwrap()
}
}
impl CompositorProxy {
pub fn recomposite(&self, reason: CompositingReason) {
self.send(Msg::Recomposite(reason));
}
}
pub enum EmbedderMsg {
/// A status message to be displayed by the browser chrome.
Status(TopLevelBrowsingContextId, Option<String>),
/// Alerts the embedder that the current page has changed its title.
ChangePageTitle(TopLevelBrowsingContextId, Option<String>),
/// Move the window to a point
MoveTo(TopLevelBrowsingContextId, Point2D<i32>),
/// Resize the window to size
ResizeTo(TopLevelBrowsingContextId, Size2D<u32>),
/// Get Window Informations size and position
GetClientWindow(TopLevelBrowsingContextId, IpcSender<(Size2D<u32>, Point2D<i32>)>),
/// Wether or not to follow a link
AllowNavigation(TopLevelBrowsingContextId, ServoUrl, IpcSender<bool>),
/// Sends an unconsumed key event back to the embedder.
KeyEvent(Option<TopLevelBrowsingContextId>, Option<char>, Key, KeyState, KeyModifiers),
/// Changes the cursor.
SetCursor(Cursor),
/// A favicon was detected
NewFavicon(TopLevelBrowsingContextId, ServoUrl),
/// <head> tag finished parsing
HeadParsed(TopLevelBrowsingContextId),
/// The history state has changed.
HistoryChanged(TopLevelBrowsingContextId, Vec<LoadData>, usize),
/// Enter or exit fullscreen
SetFullscreenState(TopLevelBrowsingContextId, bool),
/// The load of a page has begun
LoadStart(TopLevelBrowsingContextId),
/// The load of a page has completed
LoadComplete(TopLevelBrowsingContextId),
}
/// Messages from the painting thread and the constellation thread to the compositor thread.
pub enum Msg {
/// Requests that the compositor shut down.
Exit,
/// Informs the compositor that the constellation has completed shutdown.
/// Required because the constellation can have pending calls to make
/// (e.g. SetFrameTree) at the time that we send it an ExitMsg.
ShutdownComplete,
/// Alerts the compositor that the given pipeline has changed whether it is running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Replaces the current frame tree, typically called during main frame navigation.
SetFrameTree(SendableFrameTree),
/// Composite.
Recomposite(CompositingReason),
/// Script has handled a touch event, and either prevented or allowed default actions.
TouchEventProcessed(EventResult),
/// Composite to a PNG file and return the Image over a passed channel.
CreatePng(IpcSender<Option<Image>>),
/// Alerts the compositor that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
/// A reply to the compositor asking if the output image is stable.
IsReadyToSaveImageReply(bool),
/// Pipeline visibility changed
PipelineVisibilityChanged(PipelineId, bool),
/// WebRender has successfully processed a scroll. The boolean specifies whether a composite is
/// needed.
NewScrollFrameReady(bool),
/// A pipeline was shut down.
// This message acts as a synchronization point between the constellation,
// when it shuts down a pipeline, to the compositor; when the compositor
// sends a reply on the IpcSender, the constellation knows it's safe to
// tear down the other threads associated with this pipeline.
PipelineExited(PipelineId, IpcSender<()>),
/// Runs a closure in the compositor thread.
/// It's used to dispatch functions from webrender to the main thread's event loop.
/// Required to allow WGL GLContext sharing in Windows.
Dispatch(Box<Fn() + Send>),
/// Indicates to the compositor that it needs to record the time when the frame with
/// the given ID (epoch) is painted and report it to the layout thread of the given
/// pipeline ID.
PendingPaintMetric(PipelineId, Epoch),
/// The load of a page has completed
LoadComplete(TopLevelBrowsingContextId),
}
impl Debug for Msg {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
Msg::Exit => write!(f, "Exit"),
Msg::ShutdownComplete => write!(f, "ShutdownComplete"),
Msg::ChangeRunningAnimationsState(..) => write!(f, "ChangeRunningAnimationsState"),
Msg::SetFrameTree(..) => write!(f, "SetFrameTree"),
Msg::Recomposite(..) => write!(f, "Recomposite"),
Msg::TouchEventProcessed(..) => write!(f, "TouchEventProcessed"),
Msg::CreatePng(..) => write!(f, "CreatePng"),
Msg::ViewportConstrained(..) => write!(f, "ViewportConstrained"),
Msg::IsReadyToSaveImageReply(..) => write!(f, "IsReadyToSaveImageReply"),
Msg::PipelineVisibilityChanged(..) => write!(f, "PipelineVisibilityChanged"),
Msg::PipelineExited(..) => write!(f, "PipelineExited"),
Msg::NewScrollFrameReady(..) => write!(f, "NewScrollFrameReady"),
Msg::Dispatch(..) => write!(f, "Dispatch"),
Msg::PendingPaintMetric(..) => write!(f, "PendingPaintMetric"),
Msg::LoadComplete(..) => write!(f, "LoadComplete"),
}
}
}
impl Debug for EmbedderMsg {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
EmbedderMsg::Status(..) => write!(f, "Status"),
EmbedderMsg::ChangePageTitle(..) => write!(f, "ChangePageTitle"),
EmbedderMsg::MoveTo(..) => write!(f, "MoveTo"),
EmbedderMsg::ResizeTo(..) => write!(f, "ResizeTo"),
EmbedderMsg::GetClientWindow(..) => write!(f, "GetClientWindow"),
EmbedderMsg::AllowNavigation(..) => write!(f, "AllowNavigation"),
EmbedderMsg::KeyEvent(..) => write!(f, "KeyEvent"),
EmbedderMsg::SetCursor(..) => write!(f, "SetCursor"),
EmbedderMsg::NewFavicon(..) => write!(f, "NewFavicon"),
EmbedderMsg::HeadParsed(..) => write!(f, "HeadParsed"),
EmbedderMsg::HistoryChanged(..) => write!(f, "HistoryChanged"),
EmbedderMsg::SetFullscreenState(..) => write!(f, "SetFullscreenState"),
EmbedderMsg::LoadStart(..) => write!(f, "LoadStart"),
EmbedderMsg::LoadComplete(..) => write!(f, "LoadComplete"),
}
}
}
/// Data used to construct a compositor.
pub struct InitialCompositorState {
/// A channel to the compositor.
pub sender: CompositorProxy,
/// A port on which messages inbound to the compositor can be received.
pub receiver: CompositorReceiver,
/// A channel to the constellation.
pub constellation_chan: Sender<ConstellationMsg>,
/// A channel to the time profiler thread.
pub time_profiler_chan: time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// Instance of webrender API
pub webrender: webrender::Renderer,
pub webrender_document: webrender_api::DocumentId,
pub webrender_api: webrender_api::RenderApi,
}
|
random_line_split
|
|
minmax-stability-issue-23687.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(iter_min_max, cmp_partial, iter_cmp)]
use std::fmt::Debug;
use std::cmp::{self, PartialOrd, Ordering};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Foo {
n: u8,
name: &'static str
}
impl PartialOrd for Foo {
fn partial_cmp(&self, other: &Foo) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Foo {
fn cmp(&self, other: &Foo) -> Ordering {
self.n.cmp(&other.n)
}
}
fn main() {
let a = Foo { n: 4, name: "a" };
let b = Foo { n: 4, name: "b" };
let c = Foo { n: 8, name: "c" };
let d = Foo { n: 8, name: "d" };
let e = Foo { n: 22, name: "e" };
let f = Foo { n: 22, name: "f" };
let data = [a, b, c, d, e, f];
// `min` should return the left when the values are equal
assert_eq!(data.iter().min(), Some(&a));
assert_eq!(data.iter().min_by(|a| a.n), Some(&a));
assert_eq!(cmp::min(a, b), a);
assert_eq!(cmp::min(b, a), b);
// `max` should return the right when the values are equal
assert_eq!(data.iter().max(), Some(&f));
assert_eq!(data.iter().max_by(|a| a.n), Some(&f));
assert_eq!(cmp::max(e, f), f);
assert_eq!(cmp::max(f, e), e);
let mut presorted = data.to_vec();
presorted.sort();
assert_stable(&presorted);
let mut presorted = data.to_vec();
presorted.sort_by(|a, b| a.cmp(b));
assert_stable(&presorted);
// Assert that sorted and min/max are the same
fn assert_stable<T: Ord + Debug>(presorted: &[T]) {
for slice in presorted.windows(2) {
let a = &slice[0];
let b = &slice[1];
assert_eq!(a, cmp::min(a, b));
assert_eq!(b, cmp::max(a, b));
}
}
}
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
random_line_split
|
minmax-stability-issue-23687.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(iter_min_max, cmp_partial, iter_cmp)]
use std::fmt::Debug;
use std::cmp::{self, PartialOrd, Ordering};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Foo {
n: u8,
name: &'static str
}
impl PartialOrd for Foo {
fn partial_cmp(&self, other: &Foo) -> Option<Ordering>
|
}
impl Ord for Foo {
fn cmp(&self, other: &Foo) -> Ordering {
self.n.cmp(&other.n)
}
}
fn main() {
let a = Foo { n: 4, name: "a" };
let b = Foo { n: 4, name: "b" };
let c = Foo { n: 8, name: "c" };
let d = Foo { n: 8, name: "d" };
let e = Foo { n: 22, name: "e" };
let f = Foo { n: 22, name: "f" };
let data = [a, b, c, d, e, f];
// `min` should return the left when the values are equal
assert_eq!(data.iter().min(), Some(&a));
assert_eq!(data.iter().min_by(|a| a.n), Some(&a));
assert_eq!(cmp::min(a, b), a);
assert_eq!(cmp::min(b, a), b);
// `max` should return the right when the values are equal
assert_eq!(data.iter().max(), Some(&f));
assert_eq!(data.iter().max_by(|a| a.n), Some(&f));
assert_eq!(cmp::max(e, f), f);
assert_eq!(cmp::max(f, e), e);
let mut presorted = data.to_vec();
presorted.sort();
assert_stable(&presorted);
let mut presorted = data.to_vec();
presorted.sort_by(|a, b| a.cmp(b));
assert_stable(&presorted);
// Assert that sorted and min/max are the same
fn assert_stable<T: Ord + Debug>(presorted: &[T]) {
for slice in presorted.windows(2) {
let a = &slice[0];
let b = &slice[1];
assert_eq!(a, cmp::min(a, b));
assert_eq!(b, cmp::max(a, b));
}
}
}
|
{
Some(self.cmp(other))
}
|
identifier_body
|
minmax-stability-issue-23687.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(iter_min_max, cmp_partial, iter_cmp)]
use std::fmt::Debug;
use std::cmp::{self, PartialOrd, Ordering};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct Foo {
n: u8,
name: &'static str
}
impl PartialOrd for Foo {
fn partial_cmp(&self, other: &Foo) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Foo {
fn cmp(&self, other: &Foo) -> Ordering {
self.n.cmp(&other.n)
}
}
fn
|
() {
let a = Foo { n: 4, name: "a" };
let b = Foo { n: 4, name: "b" };
let c = Foo { n: 8, name: "c" };
let d = Foo { n: 8, name: "d" };
let e = Foo { n: 22, name: "e" };
let f = Foo { n: 22, name: "f" };
let data = [a, b, c, d, e, f];
// `min` should return the left when the values are equal
assert_eq!(data.iter().min(), Some(&a));
assert_eq!(data.iter().min_by(|a| a.n), Some(&a));
assert_eq!(cmp::min(a, b), a);
assert_eq!(cmp::min(b, a), b);
// `max` should return the right when the values are equal
assert_eq!(data.iter().max(), Some(&f));
assert_eq!(data.iter().max_by(|a| a.n), Some(&f));
assert_eq!(cmp::max(e, f), f);
assert_eq!(cmp::max(f, e), e);
let mut presorted = data.to_vec();
presorted.sort();
assert_stable(&presorted);
let mut presorted = data.to_vec();
presorted.sort_by(|a, b| a.cmp(b));
assert_stable(&presorted);
// Assert that sorted and min/max are the same
fn assert_stable<T: Ord + Debug>(presorted: &[T]) {
for slice in presorted.windows(2) {
let a = &slice[0];
let b = &slice[1];
assert_eq!(a, cmp::min(a, b));
assert_eq!(b, cmp::max(a, b));
}
}
}
|
main
|
identifier_name
|
modadd.rs
|
use num::{Zero, BigUint, BigInt, Integer, PrimInt};
/// The `ModAdd` trait defines an interface for calculating the sum of two integers
/// modulo a modulus.
pub trait ModAdd {
/// The function `mod_add` computes the sum
/// ```text
/// x + y (mod m)
/// ```
/// where x, y, and m are integers, and m is the modulus.
///
/// # Panics
/// when the modulus is zero.
fn mod_add(self: &Self, other: &Self, modulus: &Self) -> Self;
}
#[inline]
fn __mod_add<T: PrimInt>(x: &T, y: &T, modulus: &T) -> T {
assert!(*modulus!= <T as Zero>::zero());
((*x) + (*y)) % *modulus
}
impl ModAdd for BigInt {
fn mod_add(self: &BigInt, other: &BigInt, modulus: &BigInt) -> BigInt {
(self + other).mod_floor(modulus)
}
}
impl ModAdd for BigUint {
fn mod_add(self: &BigUint, other: &BigUint, modulus: &BigUint) -> BigUint {
(self + other).mod_floor(modulus)
}
}
// Macro for generating ModAdd implementations.
macro_rules! mod_add {
( $ T : ty ) => {
impl ModAdd for $T {
fn mod_add(self: &$T, other: &$T, modulus: &$T) -> $T {
__mod_add(self, other, modulus)
}
}
}
}
// Implementations of ModAdd trait.
mod_add!(u8);
mod_add!(u16);
mod_add!(u32);
mod_add!(u64);
mod_add!(usize);
mod_add!(i8);
mod_add!(i16);
mod_add!(i32);
mod_add!(i64);
mod_add!(isize);
#[cfg(test)]
mod tests {
use num::BigInt;
use super::ModAdd;
struct TestCase {
x: BigInt,
y: BigInt,
modulus: BigInt,
sum: BigInt,
}
struct Test {
data: Vec<TestCase>,
}
fn mod_add_test_cases() -> Test {
Test {
data: vec! [
TestCase {
x: BigInt::from(464283712),
y: BigInt::from(559073817),
modulus: BigInt::from(635717262),
sum: BigInt::from(387640267),
},
TestCase {
x: BigInt::from(-812470905),
y: BigInt::from(228473033),
modulus: BigInt::from(538744916),
sum: BigInt::from(493491960),
},
TestCase {
x: BigInt::from(227791838),
y: BigInt::from(233974561),
modulus: BigInt::from(681539081),
sum: BigInt::from(461766399),
},
TestCase {
x: BigInt::from(735172765),
y: BigInt::from(446253906),
modulus: BigInt::from(53235608),
sum: BigInt::from(10243295),
}
]
}
}
fn run_tests(tests: &Test) {
for test_case in tests.data.iter() {
let result = test_case.x.mod_add(&test_case.y, &test_case.modulus);
assert_eq!(result, test_case.sum);
}
}
#[test]
fn test_mod_add() {
run_tests(&mod_add_test_cases());
}
#[test]
#[should_panic]
fn test_mod_add_zero_mod()
|
}
|
{
let x = BigInt::from(735172765);
let y = BigInt::from(446253906);
let modulus = BigInt::from(0);
x.mod_add(&y, &modulus);
}
|
identifier_body
|
modadd.rs
|
use num::{Zero, BigUint, BigInt, Integer, PrimInt};
/// The `ModAdd` trait defines an interface for calculating the sum of two integers
/// modulo a modulus.
pub trait ModAdd {
/// The function `mod_add` computes the sum
/// ```text
/// x + y (mod m)
/// ```
/// where x, y, and m are integers, and m is the modulus.
///
/// # Panics
/// when the modulus is zero.
fn mod_add(self: &Self, other: &Self, modulus: &Self) -> Self;
}
#[inline]
fn __mod_add<T: PrimInt>(x: &T, y: &T, modulus: &T) -> T {
assert!(*modulus!= <T as Zero>::zero());
((*x) + (*y)) % *modulus
}
impl ModAdd for BigInt {
fn mod_add(self: &BigInt, other: &BigInt, modulus: &BigInt) -> BigInt {
(self + other).mod_floor(modulus)
}
}
impl ModAdd for BigUint {
fn mod_add(self: &BigUint, other: &BigUint, modulus: &BigUint) -> BigUint {
(self + other).mod_floor(modulus)
}
}
// Macro for generating ModAdd implementations.
macro_rules! mod_add {
( $ T : ty ) => {
impl ModAdd for $T {
fn mod_add(self: &$T, other: &$T, modulus: &$T) -> $T {
__mod_add(self, other, modulus)
}
}
}
}
// Implementations of ModAdd trait.
mod_add!(u8);
mod_add!(u16);
mod_add!(u32);
mod_add!(u64);
mod_add!(usize);
mod_add!(i8);
mod_add!(i16);
mod_add!(i32);
mod_add!(i64);
mod_add!(isize);
#[cfg(test)]
mod tests {
use num::BigInt;
use super::ModAdd;
struct TestCase {
x: BigInt,
y: BigInt,
modulus: BigInt,
sum: BigInt,
}
struct Test {
data: Vec<TestCase>,
}
fn mod_add_test_cases() -> Test {
Test {
|
TestCase {
x: BigInt::from(464283712),
y: BigInt::from(559073817),
modulus: BigInt::from(635717262),
sum: BigInt::from(387640267),
},
TestCase {
x: BigInt::from(-812470905),
y: BigInt::from(228473033),
modulus: BigInt::from(538744916),
sum: BigInt::from(493491960),
},
TestCase {
x: BigInt::from(227791838),
y: BigInt::from(233974561),
modulus: BigInt::from(681539081),
sum: BigInt::from(461766399),
},
TestCase {
x: BigInt::from(735172765),
y: BigInt::from(446253906),
modulus: BigInt::from(53235608),
sum: BigInt::from(10243295),
}
]
}
}
fn run_tests(tests: &Test) {
for test_case in tests.data.iter() {
let result = test_case.x.mod_add(&test_case.y, &test_case.modulus);
assert_eq!(result, test_case.sum);
}
}
#[test]
fn test_mod_add() {
run_tests(&mod_add_test_cases());
}
#[test]
#[should_panic]
fn test_mod_add_zero_mod() {
let x = BigInt::from(735172765);
let y = BigInt::from(446253906);
let modulus = BigInt::from(0);
x.mod_add(&y, &modulus);
}
}
|
data: vec! [
|
random_line_split
|
modadd.rs
|
use num::{Zero, BigUint, BigInt, Integer, PrimInt};
/// The `ModAdd` trait defines an interface for calculating the sum of two integers
/// modulo a modulus.
pub trait ModAdd {
/// The function `mod_add` computes the sum
/// ```text
/// x + y (mod m)
/// ```
/// where x, y, and m are integers, and m is the modulus.
///
/// # Panics
/// when the modulus is zero.
fn mod_add(self: &Self, other: &Self, modulus: &Self) -> Self;
}
#[inline]
fn __mod_add<T: PrimInt>(x: &T, y: &T, modulus: &T) -> T {
assert!(*modulus!= <T as Zero>::zero());
((*x) + (*y)) % *modulus
}
impl ModAdd for BigInt {
fn mod_add(self: &BigInt, other: &BigInt, modulus: &BigInt) -> BigInt {
(self + other).mod_floor(modulus)
}
}
impl ModAdd for BigUint {
fn mod_add(self: &BigUint, other: &BigUint, modulus: &BigUint) -> BigUint {
(self + other).mod_floor(modulus)
}
}
// Macro for generating ModAdd implementations.
macro_rules! mod_add {
( $ T : ty ) => {
impl ModAdd for $T {
fn mod_add(self: &$T, other: &$T, modulus: &$T) -> $T {
__mod_add(self, other, modulus)
}
}
}
}
// Implementations of ModAdd trait.
mod_add!(u8);
mod_add!(u16);
mod_add!(u32);
mod_add!(u64);
mod_add!(usize);
mod_add!(i8);
mod_add!(i16);
mod_add!(i32);
mod_add!(i64);
mod_add!(isize);
#[cfg(test)]
mod tests {
use num::BigInt;
use super::ModAdd;
struct TestCase {
x: BigInt,
y: BigInt,
modulus: BigInt,
sum: BigInt,
}
struct
|
{
data: Vec<TestCase>,
}
fn mod_add_test_cases() -> Test {
Test {
data: vec! [
TestCase {
x: BigInt::from(464283712),
y: BigInt::from(559073817),
modulus: BigInt::from(635717262),
sum: BigInt::from(387640267),
},
TestCase {
x: BigInt::from(-812470905),
y: BigInt::from(228473033),
modulus: BigInt::from(538744916),
sum: BigInt::from(493491960),
},
TestCase {
x: BigInt::from(227791838),
y: BigInt::from(233974561),
modulus: BigInt::from(681539081),
sum: BigInt::from(461766399),
},
TestCase {
x: BigInt::from(735172765),
y: BigInt::from(446253906),
modulus: BigInt::from(53235608),
sum: BigInt::from(10243295),
}
]
}
}
fn run_tests(tests: &Test) {
for test_case in tests.data.iter() {
let result = test_case.x.mod_add(&test_case.y, &test_case.modulus);
assert_eq!(result, test_case.sum);
}
}
#[test]
fn test_mod_add() {
run_tests(&mod_add_test_cases());
}
#[test]
#[should_panic]
fn test_mod_add_zero_mod() {
let x = BigInt::from(735172765);
let y = BigInt::from(446253906);
let modulus = BigInt::from(0);
x.mod_add(&y, &modulus);
}
}
|
Test
|
identifier_name
|
range.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Generating numbers between two others.
// this is surprisingly complicated to be both generic & correct
use core::prelude::{PartialOrd};
use core::num::Int;
use Rng;
use distributions::{Sample, IndependentSample};
/// Sample values uniformly between two bounds.
///
/// This gives a uniform distribution (assuming the RNG used to sample
/// it is itself uniform & the `SampleRange` implementation for the
/// given type is correct), even for edge cases like `low = 0u8`,
/// `high = 170u8`, for which a naive modulo operation would return
/// numbers less than 85 with double the probability to those greater
/// than 85.
///
/// Types should attempt to sample in `[low, high)`, i.e., not
/// including `high`, but this may be very difficult. All the
/// primitive integer types satisfy this property, and the float types
/// normally satisfy it, but rounding may mean `high` can occur.
///
/// # Example
///
/// ```rust
/// use std::rand::distributions::{IndependentSample, Range};
///
/// fn main() {
/// let between = Range::new(10u, 10000u);
/// let mut rng = std::rand::thread_rng();
/// let mut sum = 0;
/// for _ in range(0u, 1000) {
/// sum += between.ind_sample(&mut rng);
/// }
/// println!("{}", sum);
/// }
/// ```
pub struct Range<X> {
low: X,
range: X,
accept_zone: X
}
impl<X: SampleRange + PartialOrd> Range<X> {
/// Create a new `Range` instance that samples uniformly from
|
pub fn new(low: X, high: X) -> Range<X> {
assert!(low < high, "Range::new called with `low >= high`");
SampleRange::construct_range(low, high)
}
}
impl<Sup: SampleRange> Sample<Sup> for Range<Sup> {
#[inline]
fn sample<R: Rng>(&mut self, rng: &mut R) -> Sup { self.ind_sample(rng) }
}
impl<Sup: SampleRange> IndependentSample<Sup> for Range<Sup> {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> Sup {
SampleRange::sample_range(self, rng)
}
}
/// The helper trait for types that have a sensible way to sample
/// uniformly between two values. This should not be used directly,
/// and is only to facilitate `Range`.
pub trait SampleRange {
/// Construct the `Range` object that `sample_range`
/// requires. This should not ever be called directly, only via
/// `Range::new`, which will check that `low < high`, so this
/// function doesn't have to repeat the check.
fn construct_range(low: Self, high: Self) -> Range<Self>;
/// Sample a value from the given `Range` with the given `Rng` as
/// a source of randomness.
fn sample_range<R: Rng>(r: &Range<Self>, rng: &mut R) -> Self;
}
macro_rules! integer_impl {
($ty:ty, $unsigned:ty) => {
impl SampleRange for $ty {
// we play free and fast with unsigned vs signed here
// (when $ty is signed), but that's fine, since the
// contract of this macro is for $ty and $unsigned to be
// "bit-equal", so casting between them is a no-op & a
// bijection.
fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
let range = high as $unsigned - low as $unsigned;
let unsigned_max: $unsigned = Int::max_value();
// this is the largest number that fits into $unsigned
// that `range` divides evenly, so, if we've sampled
// `n` uniformly from this region, then `n % range` is
// uniform in [0, range)
let zone = unsigned_max - unsigned_max % range;
Range {
low: low,
range: range as $ty,
accept_zone: zone as $ty
}
}
#[inline]
fn sample_range<R: Rng>(r: &Range<$ty>, rng: &mut R) -> $ty {
loop {
// rejection sample
let v = rng.gen::<$unsigned>();
// until we find something that fits into the
// region which r.range evenly divides (this will
// be uniformly distributed)
if v < r.accept_zone as $unsigned {
// and return it, with some adjustments
return r.low + (v % r.range as $unsigned) as $ty;
}
}
}
}
}
}
integer_impl! { i8, u8 }
integer_impl! { i16, u16 }
integer_impl! { i32, u32 }
integer_impl! { i64, u64 }
integer_impl! { int, uint }
integer_impl! { u8, u8 }
integer_impl! { u16, u16 }
integer_impl! { u32, u32 }
integer_impl! { u64, u64 }
integer_impl! { uint, uint }
macro_rules! float_impl {
($ty:ty) => {
impl SampleRange for $ty {
fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
Range {
low: low,
range: high - low,
accept_zone: 0.0 // unused
}
}
fn sample_range<R: Rng>(r: &Range<$ty>, rng: &mut R) -> $ty {
r.low + r.range * rng.gen()
}
}
}
}
float_impl! { f32 }
float_impl! { f64 }
#[cfg(test)]
mod tests {
use std::num::Int;
use std::prelude::v1::*;
use distributions::{Sample, IndependentSample};
use super::Range as Range;
#[should_fail]
#[test]
fn test_range_bad_limits_equal() {
Range::new(10i, 10i);
}
#[should_fail]
#[test]
fn test_range_bad_limits_flipped() {
Range::new(10i, 5i);
}
#[test]
fn test_integers() {
let mut rng = ::test::rng();
macro_rules! t {
($($ty:ty),*) => {{
$(
let v: &[($ty, $ty)] = &[(0, 10),
(10, 127),
(Int::min_value(), Int::max_value())];
for &(low, high) in v.iter() {
let mut sampler: Range<$ty> = Range::new(low, high);
for _ in range(0u, 1000) {
let v = sampler.sample(&mut rng);
assert!(low <= v && v < high);
let v = sampler.ind_sample(&mut rng);
assert!(low <= v && v < high);
}
}
)*
}}
}
t!(i8, i16, i32, i64, int,
u8, u16, u32, u64, uint)
}
#[test]
fn test_floats() {
let mut rng = ::test::rng();
macro_rules! t {
($($ty:ty),*) => {{
$(
let v: &[($ty, $ty)] = &[(0.0, 100.0),
(-1e35, -1e25),
(1e-35, 1e-25),
(-1e35, 1e35)];
for &(low, high) in v.iter() {
let mut sampler: Range<$ty> = Range::new(low, high);
for _ in range(0u, 1000) {
let v = sampler.sample(&mut rng);
assert!(low <= v && v < high);
let v = sampler.ind_sample(&mut rng);
assert!(low <= v && v < high);
}
}
)*
}}
}
t!(f32, f64)
}
}
|
/// `[low, high)`. Panics if `low >= high`.
|
random_line_split
|
range.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Generating numbers between two others.
// this is surprisingly complicated to be both generic & correct
use core::prelude::{PartialOrd};
use core::num::Int;
use Rng;
use distributions::{Sample, IndependentSample};
/// Sample values uniformly between two bounds.
///
/// This gives a uniform distribution (assuming the RNG used to sample
/// it is itself uniform & the `SampleRange` implementation for the
/// given type is correct), even for edge cases like `low = 0u8`,
/// `high = 170u8`, for which a naive modulo operation would return
/// numbers less than 85 with double the probability to those greater
/// than 85.
///
/// Types should attempt to sample in `[low, high)`, i.e., not
/// including `high`, but this may be very difficult. All the
/// primitive integer types satisfy this property, and the float types
/// normally satisfy it, but rounding may mean `high` can occur.
///
/// # Example
///
/// ```rust
/// use std::rand::distributions::{IndependentSample, Range};
///
/// fn main() {
/// let between = Range::new(10u, 10000u);
/// let mut rng = std::rand::thread_rng();
/// let mut sum = 0;
/// for _ in range(0u, 1000) {
/// sum += between.ind_sample(&mut rng);
/// }
/// println!("{}", sum);
/// }
/// ```
pub struct Range<X> {
low: X,
range: X,
accept_zone: X
}
impl<X: SampleRange + PartialOrd> Range<X> {
/// Create a new `Range` instance that samples uniformly from
/// `[low, high)`. Panics if `low >= high`.
pub fn
|
(low: X, high: X) -> Range<X> {
assert!(low < high, "Range::new called with `low >= high`");
SampleRange::construct_range(low, high)
}
}
impl<Sup: SampleRange> Sample<Sup> for Range<Sup> {
#[inline]
fn sample<R: Rng>(&mut self, rng: &mut R) -> Sup { self.ind_sample(rng) }
}
impl<Sup: SampleRange> IndependentSample<Sup> for Range<Sup> {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> Sup {
SampleRange::sample_range(self, rng)
}
}
/// The helper trait for types that have a sensible way to sample
/// uniformly between two values. This should not be used directly,
/// and is only to facilitate `Range`.
pub trait SampleRange {
/// Construct the `Range` object that `sample_range`
/// requires. This should not ever be called directly, only via
/// `Range::new`, which will check that `low < high`, so this
/// function doesn't have to repeat the check.
fn construct_range(low: Self, high: Self) -> Range<Self>;
/// Sample a value from the given `Range` with the given `Rng` as
/// a source of randomness.
fn sample_range<R: Rng>(r: &Range<Self>, rng: &mut R) -> Self;
}
macro_rules! integer_impl {
($ty:ty, $unsigned:ty) => {
impl SampleRange for $ty {
// we play free and fast with unsigned vs signed here
// (when $ty is signed), but that's fine, since the
// contract of this macro is for $ty and $unsigned to be
// "bit-equal", so casting between them is a no-op & a
// bijection.
fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
let range = high as $unsigned - low as $unsigned;
let unsigned_max: $unsigned = Int::max_value();
// this is the largest number that fits into $unsigned
// that `range` divides evenly, so, if we've sampled
// `n` uniformly from this region, then `n % range` is
// uniform in [0, range)
let zone = unsigned_max - unsigned_max % range;
Range {
low: low,
range: range as $ty,
accept_zone: zone as $ty
}
}
#[inline]
fn sample_range<R: Rng>(r: &Range<$ty>, rng: &mut R) -> $ty {
loop {
// rejection sample
let v = rng.gen::<$unsigned>();
// until we find something that fits into the
// region which r.range evenly divides (this will
// be uniformly distributed)
if v < r.accept_zone as $unsigned {
// and return it, with some adjustments
return r.low + (v % r.range as $unsigned) as $ty;
}
}
}
}
}
}
integer_impl! { i8, u8 }
integer_impl! { i16, u16 }
integer_impl! { i32, u32 }
integer_impl! { i64, u64 }
integer_impl! { int, uint }
integer_impl! { u8, u8 }
integer_impl! { u16, u16 }
integer_impl! { u32, u32 }
integer_impl! { u64, u64 }
integer_impl! { uint, uint }
macro_rules! float_impl {
($ty:ty) => {
impl SampleRange for $ty {
fn construct_range(low: $ty, high: $ty) -> Range<$ty> {
Range {
low: low,
range: high - low,
accept_zone: 0.0 // unused
}
}
fn sample_range<R: Rng>(r: &Range<$ty>, rng: &mut R) -> $ty {
r.low + r.range * rng.gen()
}
}
}
}
float_impl! { f32 }
float_impl! { f64 }
#[cfg(test)]
mod tests {
use std::num::Int;
use std::prelude::v1::*;
use distributions::{Sample, IndependentSample};
use super::Range as Range;
#[should_fail]
#[test]
fn test_range_bad_limits_equal() {
Range::new(10i, 10i);
}
#[should_fail]
#[test]
fn test_range_bad_limits_flipped() {
Range::new(10i, 5i);
}
#[test]
fn test_integers() {
let mut rng = ::test::rng();
macro_rules! t {
($($ty:ty),*) => {{
$(
let v: &[($ty, $ty)] = &[(0, 10),
(10, 127),
(Int::min_value(), Int::max_value())];
for &(low, high) in v.iter() {
let mut sampler: Range<$ty> = Range::new(low, high);
for _ in range(0u, 1000) {
let v = sampler.sample(&mut rng);
assert!(low <= v && v < high);
let v = sampler.ind_sample(&mut rng);
assert!(low <= v && v < high);
}
}
)*
}}
}
t!(i8, i16, i32, i64, int,
u8, u16, u32, u64, uint)
}
#[test]
fn test_floats() {
let mut rng = ::test::rng();
macro_rules! t {
($($ty:ty),*) => {{
$(
let v: &[($ty, $ty)] = &[(0.0, 100.0),
(-1e35, -1e25),
(1e-35, 1e-25),
(-1e35, 1e35)];
for &(low, high) in v.iter() {
let mut sampler: Range<$ty> = Range::new(low, high);
for _ in range(0u, 1000) {
let v = sampler.sample(&mut rng);
assert!(low <= v && v < high);
let v = sampler.ind_sample(&mut rng);
assert!(low <= v && v < high);
}
}
)*
}}
}
t!(f32, f64)
}
}
|
new
|
identifier_name
|
serviceworkerglobalscope.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use devtools;
use devtools_traits::DevtoolScriptControlMsg;
use dom::abstractworker::WorkerScriptMsg;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::ServiceWorkerGlobalScopeBinding;
use dom::bindings::codegen::Bindings::ServiceWorkerGlobalScopeBinding::ServiceWorkerGlobalScopeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootCollection};
use dom::bindings::reflector::DomObject;
use dom::bindings::str::DOMString;
use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::extendableevent::ExtendableEvent;
use dom::extendablemessageevent::ExtendableMessageEvent;
use dom::globalscope::GlobalScope;
use dom::workerglobalscope::WorkerGlobalScope;
use dom_struct::dom_struct;
use ipc_channel::ipc::{self, IpcSender, IpcReceiver};
use ipc_channel::router::ROUTER;
use js::jsapi::{JS_SetInterruptCallback, JSAutoCompartment, JSContext};
use js::jsval::UndefinedValue;
use js::rust::Runtime;
use net_traits::{load_whole_resource, IpcSend, CustomResponseMediator};
use net_traits::request::{CredentialsMode, Destination, RequestInit, Type as RequestType};
use script_runtime::{CommonScriptMsg, StackRootTLS, get_reports, new_rt_and_cx, ScriptChan};
use script_traits::{TimerEvent, WorkerGlobalScopeInit, ScopeThings, ServiceWorkerMsg, WorkerScriptLoadOrigin};
use servo_config::prefs::PREFS;
use servo_rand::random;
use servo_url::ServoUrl;
use std::sync::mpsc::{Receiver, RecvError, Select, Sender, channel};
use std::thread;
use std::time::Duration;
use style::thread_state::{self, IN_WORKER, SCRIPT};
/// Messages used to control service worker event loop
pub enum ServiceWorkerScriptMsg {
/// Message common to all workers
CommonWorker(WorkerScriptMsg),
// Message to request a custom response by the service worker
Response(CustomResponseMediator)
}
pub enum MixedMessage {
FromServiceWorker(ServiceWorkerScriptMsg),
FromDevtools(DevtoolScriptControlMsg),
FromTimeoutThread(())
}
#[derive(JSTraceable, Clone)]
pub struct ServiceWorkerChan {
pub sender: Sender<ServiceWorkerScriptMsg>
}
impl ScriptChan for ServiceWorkerChan {
fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> {
self.sender
.send(ServiceWorkerScriptMsg::CommonWorker(WorkerScriptMsg::Common(msg)))
.map_err(|_| ())
}
fn clone(&self) -> Box<ScriptChan + Send> {
box ServiceWorkerChan {
sender: self.sender.clone(),
}
}
}
#[dom_struct]
pub struct ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope,
#[ignore_heap_size_of = "Defined in std"]
receiver: Receiver<ServiceWorkerScriptMsg>,
#[ignore_heap_size_of = "Defined in std"]
own_sender: Sender<ServiceWorkerScriptMsg>,
#[ignore_heap_size_of = "Defined in std"]
timer_event_port: Receiver<()>,
#[ignore_heap_size_of = "Defined in std"]
swmanager_sender: IpcSender<ServiceWorkerMsg>,
|
impl ServiceWorkerGlobalScope {
fn new_inherited(init: WorkerGlobalScopeInit,
worker_url: ServoUrl,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
runtime: Runtime,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
timer_event_chan: IpcSender<TimerEvent>,
timer_event_port: Receiver<()>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl)
-> ServiceWorkerGlobalScope {
ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope::new_inherited(init,
worker_url,
runtime,
from_devtools_receiver,
timer_event_chan,
None),
receiver: receiver,
timer_event_port: timer_event_port,
own_sender: own_sender,
swmanager_sender: swmanager_sender,
scope_url: scope_url
}
}
#[allow(unsafe_code)]
pub fn new(init: WorkerGlobalScopeInit,
worker_url: ServoUrl,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
runtime: Runtime,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
timer_event_chan: IpcSender<TimerEvent>,
timer_event_port: Receiver<()>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl)
-> Root<ServiceWorkerGlobalScope> {
let cx = runtime.cx();
let scope = box ServiceWorkerGlobalScope::new_inherited(init,
worker_url,
from_devtools_receiver,
runtime,
own_sender,
receiver,
timer_event_chan,
timer_event_port,
swmanager_sender,
scope_url);
unsafe {
ServiceWorkerGlobalScopeBinding::Wrap(cx, scope)
}
}
#[allow(unsafe_code)]
pub fn run_serviceworker_scope(scope_things: ScopeThings,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
devtools_receiver: IpcReceiver<DevtoolScriptControlMsg>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl) {
let ScopeThings { script_url,
init,
worker_load_origin,
.. } = scope_things;
let serialized_worker_url = script_url.to_string();
thread::Builder::new().name(format!("ServiceWorker for {}", serialized_worker_url)).spawn(move || {
thread_state::initialize(SCRIPT | IN_WORKER);
let roots = RootCollection::new();
let _stack_roots_tls = StackRootTLS::new(&roots);
let WorkerScriptLoadOrigin { referrer_url, referrer_policy, pipeline_id } = worker_load_origin;
let request = RequestInit {
url: script_url.clone(),
type_: RequestType::Script,
destination: Destination::ServiceWorker,
credentials_mode: CredentialsMode::Include,
use_url_credentials: true,
origin: script_url,
pipeline_id: pipeline_id,
referrer_url: referrer_url,
referrer_policy: referrer_policy,
.. RequestInit::default()
};
let (url, source) = match load_whole_resource(request,
&init.resource_threads.sender()) {
Err(_) => {
println!("error loading script {}", serialized_worker_url);
return;
}
Ok((metadata, bytes)) => {
(metadata.final_url, String::from_utf8(bytes).unwrap())
}
};
let runtime = unsafe { new_rt_and_cx() };
let (devtools_mpsc_chan, devtools_mpsc_port) = channel();
ROUTER.route_ipc_receiver_to_mpsc_sender(devtools_receiver, devtools_mpsc_chan);
// TODO XXXcreativcoder use this timer_ipc_port, when we have a service worker instance here
let (timer_ipc_chan, _timer_ipc_port) = ipc::channel().unwrap();
let (timer_chan, timer_port) = channel();
let global = ServiceWorkerGlobalScope::new(
init, url, devtools_mpsc_port, runtime,
own_sender, receiver,
timer_ipc_chan, timer_port, swmanager_sender, scope_url);
let scope = global.upcast::<WorkerGlobalScope>();
unsafe {
// Handle interrupt requests
JS_SetInterruptCallback(scope.runtime(), Some(interrupt_callback));
}
scope.execute_script(DOMString::from(source));
// Service workers are time limited
thread::Builder::new().name("SWTimeoutThread".to_owned()).spawn(move || {
let sw_lifetime_timeout = PREFS.get("dom.serviceworker.timeout_seconds").as_u64().unwrap();
thread::sleep(Duration::new(sw_lifetime_timeout, 0));
let _ = timer_chan.send(());
}).expect("Thread spawning failed");
global.dispatch_activate();
let reporter_name = format!("service-worker-reporter-{}", random::<u64>());
scope.upcast::<GlobalScope>().mem_profiler_chan().run_with_memory_reporting(|| {
// https://html.spec.whatwg.org/multipage/#event-loop-processing-model
// Step 1
while let Ok(event) = global.receive_event() {
// Step 3
if!global.handle_event(event) {
break;
}
// Step 6
global.upcast::<WorkerGlobalScope>().perform_a_microtask_checkpoint();
}
}, reporter_name, scope.script_chan(), CommonScriptMsg::CollectReports);
}).expect("Thread spawning failed");
}
fn handle_event(&self, event: MixedMessage) -> bool {
match event {
MixedMessage::FromDevtools(msg) => {
match msg {
DevtoolScriptControlMsg::EvaluateJS(_pipe_id, string, sender) =>
devtools::handle_evaluate_js(self.upcast(), string, sender),
DevtoolScriptControlMsg::GetCachedMessages(pipe_id, message_types, sender) =>
devtools::handle_get_cached_messages(pipe_id, message_types, sender),
DevtoolScriptControlMsg::WantsLiveNotifications(_pipe_id, bool_val) =>
devtools::handle_wants_live_notifications(self.upcast(), bool_val),
_ => debug!("got an unusable devtools control message inside the worker!"),
}
true
}
MixedMessage::FromServiceWorker(msg) => {
self.handle_script_event(msg);
true
}
MixedMessage::FromTimeoutThread(_) => {
let _ = self.swmanager_sender.send(ServiceWorkerMsg::Timeout(self.scope_url.clone()));
false
}
}
}
fn handle_script_event(&self, msg: ServiceWorkerScriptMsg) {
use self::ServiceWorkerScriptMsg::*;
match msg {
CommonWorker(WorkerScriptMsg::DOMMessage(data)) => {
let scope = self.upcast::<WorkerGlobalScope>();
let target = self.upcast();
let _ac = JSAutoCompartment::new(scope.get_cx(), scope.reflector().get_jsobject().get());
rooted!(in(scope.get_cx()) let mut message = UndefinedValue());
data.read(scope.upcast(), message.handle_mut());
ExtendableMessageEvent::dispatch_jsval(target, scope.upcast(), message.handle());
},
CommonWorker(WorkerScriptMsg::Common(CommonScriptMsg::RunnableMsg(_, runnable))) => {
runnable.handler()
},
CommonWorker(WorkerScriptMsg::Common(CommonScriptMsg::CollectReports(reports_chan))) => {
let scope = self.upcast::<WorkerGlobalScope>();
let cx = scope.get_cx();
let path_seg = format!("url({})", scope.get_url());
let reports = get_reports(cx, path_seg);
reports_chan.send(reports);
},
Response(mediator) => {
// TODO XXXcreativcoder This will eventually use a FetchEvent interface to fire event
// when we have the Request and Response dom api's implemented
// https://slightlyoff.github.io/ServiceWorker/spec/service_worker_1/index.html#fetch-event-section
self.upcast::<EventTarget>().fire_event(atom!("fetch"));
let _ = mediator.response_chan.send(None);
}
}
}
#[allow(unsafe_code)]
fn receive_event(&self) -> Result<MixedMessage, RecvError> {
let scope = self.upcast::<WorkerGlobalScope>();
let worker_port = &self.receiver;
let devtools_port = scope.from_devtools_receiver();
let timer_event_port = &self.timer_event_port;
let sel = Select::new();
let mut worker_handle = sel.handle(worker_port);
let mut devtools_handle = sel.handle(devtools_port);
let mut timer_port_handle = sel.handle(timer_event_port);
unsafe {
worker_handle.add();
if scope.from_devtools_sender().is_some() {
devtools_handle.add();
}
timer_port_handle.add();
}
let ret = sel.wait();
if ret == worker_handle.id() {
Ok(MixedMessage::FromServiceWorker(worker_port.recv()?))
}else if ret == devtools_handle.id() {
Ok(MixedMessage::FromDevtools(devtools_port.recv()?))
} else if ret == timer_port_handle.id() {
Ok(MixedMessage::FromTimeoutThread(timer_event_port.recv()?))
} else {
panic!("unexpected select result!")
}
}
pub fn process_event(&self, msg: CommonScriptMsg) {
self.handle_script_event(ServiceWorkerScriptMsg::CommonWorker(WorkerScriptMsg::Common(msg)));
}
pub fn script_chan(&self) -> Box<ScriptChan + Send> {
box ServiceWorkerChan {
sender: self.own_sender.clone()
}
}
fn dispatch_activate(&self) {
let event = ExtendableEvent::new(self, atom!("activate"), false, false);
let event = (&*event).upcast::<Event>();
self.upcast::<EventTarget>().dispatch_event(event);
}
}
#[allow(unsafe_code)]
unsafe extern "C" fn interrupt_callback(cx: *mut JSContext) -> bool {
let worker =
Root::downcast::<WorkerGlobalScope>(GlobalScope::from_context(cx))
.expect("global is not a worker scope");
assert!(worker.is::<ServiceWorkerGlobalScope>());
// A false response causes the script to terminate
!worker.is_closing()
}
impl ServiceWorkerGlobalScopeMethods for ServiceWorkerGlobalScope {
// https://w3c.github.io/ServiceWorker/#service-worker-global-scope-onmessage-attribute
event_handler!(message, GetOnmessage, SetOnmessage);
}
|
scope_url: ServoUrl,
}
|
random_line_split
|
serviceworkerglobalscope.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use devtools;
use devtools_traits::DevtoolScriptControlMsg;
use dom::abstractworker::WorkerScriptMsg;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::ServiceWorkerGlobalScopeBinding;
use dom::bindings::codegen::Bindings::ServiceWorkerGlobalScopeBinding::ServiceWorkerGlobalScopeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootCollection};
use dom::bindings::reflector::DomObject;
use dom::bindings::str::DOMString;
use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::extendableevent::ExtendableEvent;
use dom::extendablemessageevent::ExtendableMessageEvent;
use dom::globalscope::GlobalScope;
use dom::workerglobalscope::WorkerGlobalScope;
use dom_struct::dom_struct;
use ipc_channel::ipc::{self, IpcSender, IpcReceiver};
use ipc_channel::router::ROUTER;
use js::jsapi::{JS_SetInterruptCallback, JSAutoCompartment, JSContext};
use js::jsval::UndefinedValue;
use js::rust::Runtime;
use net_traits::{load_whole_resource, IpcSend, CustomResponseMediator};
use net_traits::request::{CredentialsMode, Destination, RequestInit, Type as RequestType};
use script_runtime::{CommonScriptMsg, StackRootTLS, get_reports, new_rt_and_cx, ScriptChan};
use script_traits::{TimerEvent, WorkerGlobalScopeInit, ScopeThings, ServiceWorkerMsg, WorkerScriptLoadOrigin};
use servo_config::prefs::PREFS;
use servo_rand::random;
use servo_url::ServoUrl;
use std::sync::mpsc::{Receiver, RecvError, Select, Sender, channel};
use std::thread;
use std::time::Duration;
use style::thread_state::{self, IN_WORKER, SCRIPT};
/// Messages used to control service worker event loop
pub enum ServiceWorkerScriptMsg {
/// Message common to all workers
CommonWorker(WorkerScriptMsg),
// Message to request a custom response by the service worker
Response(CustomResponseMediator)
}
pub enum MixedMessage {
FromServiceWorker(ServiceWorkerScriptMsg),
FromDevtools(DevtoolScriptControlMsg),
FromTimeoutThread(())
}
#[derive(JSTraceable, Clone)]
pub struct ServiceWorkerChan {
pub sender: Sender<ServiceWorkerScriptMsg>
}
impl ScriptChan for ServiceWorkerChan {
fn send(&self, msg: CommonScriptMsg) -> Result<(), ()>
|
fn clone(&self) -> Box<ScriptChan + Send> {
box ServiceWorkerChan {
sender: self.sender.clone(),
}
}
}
#[dom_struct]
pub struct ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope,
#[ignore_heap_size_of = "Defined in std"]
receiver: Receiver<ServiceWorkerScriptMsg>,
#[ignore_heap_size_of = "Defined in std"]
own_sender: Sender<ServiceWorkerScriptMsg>,
#[ignore_heap_size_of = "Defined in std"]
timer_event_port: Receiver<()>,
#[ignore_heap_size_of = "Defined in std"]
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
}
impl ServiceWorkerGlobalScope {
fn new_inherited(init: WorkerGlobalScopeInit,
worker_url: ServoUrl,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
runtime: Runtime,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
timer_event_chan: IpcSender<TimerEvent>,
timer_event_port: Receiver<()>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl)
-> ServiceWorkerGlobalScope {
ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope::new_inherited(init,
worker_url,
runtime,
from_devtools_receiver,
timer_event_chan,
None),
receiver: receiver,
timer_event_port: timer_event_port,
own_sender: own_sender,
swmanager_sender: swmanager_sender,
scope_url: scope_url
}
}
#[allow(unsafe_code)]
pub fn new(init: WorkerGlobalScopeInit,
worker_url: ServoUrl,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
runtime: Runtime,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
timer_event_chan: IpcSender<TimerEvent>,
timer_event_port: Receiver<()>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl)
-> Root<ServiceWorkerGlobalScope> {
let cx = runtime.cx();
let scope = box ServiceWorkerGlobalScope::new_inherited(init,
worker_url,
from_devtools_receiver,
runtime,
own_sender,
receiver,
timer_event_chan,
timer_event_port,
swmanager_sender,
scope_url);
unsafe {
ServiceWorkerGlobalScopeBinding::Wrap(cx, scope)
}
}
#[allow(unsafe_code)]
pub fn run_serviceworker_scope(scope_things: ScopeThings,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
devtools_receiver: IpcReceiver<DevtoolScriptControlMsg>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl) {
let ScopeThings { script_url,
init,
worker_load_origin,
.. } = scope_things;
let serialized_worker_url = script_url.to_string();
thread::Builder::new().name(format!("ServiceWorker for {}", serialized_worker_url)).spawn(move || {
thread_state::initialize(SCRIPT | IN_WORKER);
let roots = RootCollection::new();
let _stack_roots_tls = StackRootTLS::new(&roots);
let WorkerScriptLoadOrigin { referrer_url, referrer_policy, pipeline_id } = worker_load_origin;
let request = RequestInit {
url: script_url.clone(),
type_: RequestType::Script,
destination: Destination::ServiceWorker,
credentials_mode: CredentialsMode::Include,
use_url_credentials: true,
origin: script_url,
pipeline_id: pipeline_id,
referrer_url: referrer_url,
referrer_policy: referrer_policy,
.. RequestInit::default()
};
let (url, source) = match load_whole_resource(request,
&init.resource_threads.sender()) {
Err(_) => {
println!("error loading script {}", serialized_worker_url);
return;
}
Ok((metadata, bytes)) => {
(metadata.final_url, String::from_utf8(bytes).unwrap())
}
};
let runtime = unsafe { new_rt_and_cx() };
let (devtools_mpsc_chan, devtools_mpsc_port) = channel();
ROUTER.route_ipc_receiver_to_mpsc_sender(devtools_receiver, devtools_mpsc_chan);
// TODO XXXcreativcoder use this timer_ipc_port, when we have a service worker instance here
let (timer_ipc_chan, _timer_ipc_port) = ipc::channel().unwrap();
let (timer_chan, timer_port) = channel();
let global = ServiceWorkerGlobalScope::new(
init, url, devtools_mpsc_port, runtime,
own_sender, receiver,
timer_ipc_chan, timer_port, swmanager_sender, scope_url);
let scope = global.upcast::<WorkerGlobalScope>();
unsafe {
// Handle interrupt requests
JS_SetInterruptCallback(scope.runtime(), Some(interrupt_callback));
}
scope.execute_script(DOMString::from(source));
// Service workers are time limited
thread::Builder::new().name("SWTimeoutThread".to_owned()).spawn(move || {
let sw_lifetime_timeout = PREFS.get("dom.serviceworker.timeout_seconds").as_u64().unwrap();
thread::sleep(Duration::new(sw_lifetime_timeout, 0));
let _ = timer_chan.send(());
}).expect("Thread spawning failed");
global.dispatch_activate();
let reporter_name = format!("service-worker-reporter-{}", random::<u64>());
scope.upcast::<GlobalScope>().mem_profiler_chan().run_with_memory_reporting(|| {
// https://html.spec.whatwg.org/multipage/#event-loop-processing-model
// Step 1
while let Ok(event) = global.receive_event() {
// Step 3
if!global.handle_event(event) {
break;
}
// Step 6
global.upcast::<WorkerGlobalScope>().perform_a_microtask_checkpoint();
}
}, reporter_name, scope.script_chan(), CommonScriptMsg::CollectReports);
}).expect("Thread spawning failed");
}
fn handle_event(&self, event: MixedMessage) -> bool {
match event {
MixedMessage::FromDevtools(msg) => {
match msg {
DevtoolScriptControlMsg::EvaluateJS(_pipe_id, string, sender) =>
devtools::handle_evaluate_js(self.upcast(), string, sender),
DevtoolScriptControlMsg::GetCachedMessages(pipe_id, message_types, sender) =>
devtools::handle_get_cached_messages(pipe_id, message_types, sender),
DevtoolScriptControlMsg::WantsLiveNotifications(_pipe_id, bool_val) =>
devtools::handle_wants_live_notifications(self.upcast(), bool_val),
_ => debug!("got an unusable devtools control message inside the worker!"),
}
true
}
MixedMessage::FromServiceWorker(msg) => {
self.handle_script_event(msg);
true
}
MixedMessage::FromTimeoutThread(_) => {
let _ = self.swmanager_sender.send(ServiceWorkerMsg::Timeout(self.scope_url.clone()));
false
}
}
}
fn handle_script_event(&self, msg: ServiceWorkerScriptMsg) {
use self::ServiceWorkerScriptMsg::*;
match msg {
CommonWorker(WorkerScriptMsg::DOMMessage(data)) => {
let scope = self.upcast::<WorkerGlobalScope>();
let target = self.upcast();
let _ac = JSAutoCompartment::new(scope.get_cx(), scope.reflector().get_jsobject().get());
rooted!(in(scope.get_cx()) let mut message = UndefinedValue());
data.read(scope.upcast(), message.handle_mut());
ExtendableMessageEvent::dispatch_jsval(target, scope.upcast(), message.handle());
},
CommonWorker(WorkerScriptMsg::Common(CommonScriptMsg::RunnableMsg(_, runnable))) => {
runnable.handler()
},
CommonWorker(WorkerScriptMsg::Common(CommonScriptMsg::CollectReports(reports_chan))) => {
let scope = self.upcast::<WorkerGlobalScope>();
let cx = scope.get_cx();
let path_seg = format!("url({})", scope.get_url());
let reports = get_reports(cx, path_seg);
reports_chan.send(reports);
},
Response(mediator) => {
// TODO XXXcreativcoder This will eventually use a FetchEvent interface to fire event
// when we have the Request and Response dom api's implemented
// https://slightlyoff.github.io/ServiceWorker/spec/service_worker_1/index.html#fetch-event-section
self.upcast::<EventTarget>().fire_event(atom!("fetch"));
let _ = mediator.response_chan.send(None);
}
}
}
#[allow(unsafe_code)]
fn receive_event(&self) -> Result<MixedMessage, RecvError> {
let scope = self.upcast::<WorkerGlobalScope>();
let worker_port = &self.receiver;
let devtools_port = scope.from_devtools_receiver();
let timer_event_port = &self.timer_event_port;
let sel = Select::new();
let mut worker_handle = sel.handle(worker_port);
let mut devtools_handle = sel.handle(devtools_port);
let mut timer_port_handle = sel.handle(timer_event_port);
unsafe {
worker_handle.add();
if scope.from_devtools_sender().is_some() {
devtools_handle.add();
}
timer_port_handle.add();
}
let ret = sel.wait();
if ret == worker_handle.id() {
Ok(MixedMessage::FromServiceWorker(worker_port.recv()?))
}else if ret == devtools_handle.id() {
Ok(MixedMessage::FromDevtools(devtools_port.recv()?))
} else if ret == timer_port_handle.id() {
Ok(MixedMessage::FromTimeoutThread(timer_event_port.recv()?))
} else {
panic!("unexpected select result!")
}
}
pub fn process_event(&self, msg: CommonScriptMsg) {
self.handle_script_event(ServiceWorkerScriptMsg::CommonWorker(WorkerScriptMsg::Common(msg)));
}
pub fn script_chan(&self) -> Box<ScriptChan + Send> {
box ServiceWorkerChan {
sender: self.own_sender.clone()
}
}
fn dispatch_activate(&self) {
let event = ExtendableEvent::new(self, atom!("activate"), false, false);
let event = (&*event).upcast::<Event>();
self.upcast::<EventTarget>().dispatch_event(event);
}
}
#[allow(unsafe_code)]
unsafe extern "C" fn interrupt_callback(cx: *mut JSContext) -> bool {
let worker =
Root::downcast::<WorkerGlobalScope>(GlobalScope::from_context(cx))
.expect("global is not a worker scope");
assert!(worker.is::<ServiceWorkerGlobalScope>());
// A false response causes the script to terminate
!worker.is_closing()
}
impl ServiceWorkerGlobalScopeMethods for ServiceWorkerGlobalScope {
// https://w3c.github.io/ServiceWorker/#service-worker-global-scope-onmessage-attribute
event_handler!(message, GetOnmessage, SetOnmessage);
}
|
{
self.sender
.send(ServiceWorkerScriptMsg::CommonWorker(WorkerScriptMsg::Common(msg)))
.map_err(|_| ())
}
|
identifier_body
|
serviceworkerglobalscope.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use devtools;
use devtools_traits::DevtoolScriptControlMsg;
use dom::abstractworker::WorkerScriptMsg;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::ServiceWorkerGlobalScopeBinding;
use dom::bindings::codegen::Bindings::ServiceWorkerGlobalScopeBinding::ServiceWorkerGlobalScopeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootCollection};
use dom::bindings::reflector::DomObject;
use dom::bindings::str::DOMString;
use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::extendableevent::ExtendableEvent;
use dom::extendablemessageevent::ExtendableMessageEvent;
use dom::globalscope::GlobalScope;
use dom::workerglobalscope::WorkerGlobalScope;
use dom_struct::dom_struct;
use ipc_channel::ipc::{self, IpcSender, IpcReceiver};
use ipc_channel::router::ROUTER;
use js::jsapi::{JS_SetInterruptCallback, JSAutoCompartment, JSContext};
use js::jsval::UndefinedValue;
use js::rust::Runtime;
use net_traits::{load_whole_resource, IpcSend, CustomResponseMediator};
use net_traits::request::{CredentialsMode, Destination, RequestInit, Type as RequestType};
use script_runtime::{CommonScriptMsg, StackRootTLS, get_reports, new_rt_and_cx, ScriptChan};
use script_traits::{TimerEvent, WorkerGlobalScopeInit, ScopeThings, ServiceWorkerMsg, WorkerScriptLoadOrigin};
use servo_config::prefs::PREFS;
use servo_rand::random;
use servo_url::ServoUrl;
use std::sync::mpsc::{Receiver, RecvError, Select, Sender, channel};
use std::thread;
use std::time::Duration;
use style::thread_state::{self, IN_WORKER, SCRIPT};
/// Messages used to control service worker event loop
pub enum ServiceWorkerScriptMsg {
/// Message common to all workers
CommonWorker(WorkerScriptMsg),
// Message to request a custom response by the service worker
Response(CustomResponseMediator)
}
pub enum MixedMessage {
FromServiceWorker(ServiceWorkerScriptMsg),
FromDevtools(DevtoolScriptControlMsg),
FromTimeoutThread(())
}
#[derive(JSTraceable, Clone)]
pub struct ServiceWorkerChan {
pub sender: Sender<ServiceWorkerScriptMsg>
}
impl ScriptChan for ServiceWorkerChan {
fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> {
self.sender
.send(ServiceWorkerScriptMsg::CommonWorker(WorkerScriptMsg::Common(msg)))
.map_err(|_| ())
}
fn clone(&self) -> Box<ScriptChan + Send> {
box ServiceWorkerChan {
sender: self.sender.clone(),
}
}
}
#[dom_struct]
pub struct ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope,
#[ignore_heap_size_of = "Defined in std"]
receiver: Receiver<ServiceWorkerScriptMsg>,
#[ignore_heap_size_of = "Defined in std"]
own_sender: Sender<ServiceWorkerScriptMsg>,
#[ignore_heap_size_of = "Defined in std"]
timer_event_port: Receiver<()>,
#[ignore_heap_size_of = "Defined in std"]
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
}
impl ServiceWorkerGlobalScope {
fn new_inherited(init: WorkerGlobalScopeInit,
worker_url: ServoUrl,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
runtime: Runtime,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
timer_event_chan: IpcSender<TimerEvent>,
timer_event_port: Receiver<()>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl)
-> ServiceWorkerGlobalScope {
ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope::new_inherited(init,
worker_url,
runtime,
from_devtools_receiver,
timer_event_chan,
None),
receiver: receiver,
timer_event_port: timer_event_port,
own_sender: own_sender,
swmanager_sender: swmanager_sender,
scope_url: scope_url
}
}
#[allow(unsafe_code)]
pub fn new(init: WorkerGlobalScopeInit,
worker_url: ServoUrl,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
runtime: Runtime,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
timer_event_chan: IpcSender<TimerEvent>,
timer_event_port: Receiver<()>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl)
-> Root<ServiceWorkerGlobalScope> {
let cx = runtime.cx();
let scope = box ServiceWorkerGlobalScope::new_inherited(init,
worker_url,
from_devtools_receiver,
runtime,
own_sender,
receiver,
timer_event_chan,
timer_event_port,
swmanager_sender,
scope_url);
unsafe {
ServiceWorkerGlobalScopeBinding::Wrap(cx, scope)
}
}
#[allow(unsafe_code)]
pub fn run_serviceworker_scope(scope_things: ScopeThings,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
devtools_receiver: IpcReceiver<DevtoolScriptControlMsg>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl) {
let ScopeThings { script_url,
init,
worker_load_origin,
.. } = scope_things;
let serialized_worker_url = script_url.to_string();
thread::Builder::new().name(format!("ServiceWorker for {}", serialized_worker_url)).spawn(move || {
thread_state::initialize(SCRIPT | IN_WORKER);
let roots = RootCollection::new();
let _stack_roots_tls = StackRootTLS::new(&roots);
let WorkerScriptLoadOrigin { referrer_url, referrer_policy, pipeline_id } = worker_load_origin;
let request = RequestInit {
url: script_url.clone(),
type_: RequestType::Script,
destination: Destination::ServiceWorker,
credentials_mode: CredentialsMode::Include,
use_url_credentials: true,
origin: script_url,
pipeline_id: pipeline_id,
referrer_url: referrer_url,
referrer_policy: referrer_policy,
.. RequestInit::default()
};
let (url, source) = match load_whole_resource(request,
&init.resource_threads.sender()) {
Err(_) => {
println!("error loading script {}", serialized_worker_url);
return;
}
Ok((metadata, bytes)) => {
(metadata.final_url, String::from_utf8(bytes).unwrap())
}
};
let runtime = unsafe { new_rt_and_cx() };
let (devtools_mpsc_chan, devtools_mpsc_port) = channel();
ROUTER.route_ipc_receiver_to_mpsc_sender(devtools_receiver, devtools_mpsc_chan);
// TODO XXXcreativcoder use this timer_ipc_port, when we have a service worker instance here
let (timer_ipc_chan, _timer_ipc_port) = ipc::channel().unwrap();
let (timer_chan, timer_port) = channel();
let global = ServiceWorkerGlobalScope::new(
init, url, devtools_mpsc_port, runtime,
own_sender, receiver,
timer_ipc_chan, timer_port, swmanager_sender, scope_url);
let scope = global.upcast::<WorkerGlobalScope>();
unsafe {
// Handle interrupt requests
JS_SetInterruptCallback(scope.runtime(), Some(interrupt_callback));
}
scope.execute_script(DOMString::from(source));
// Service workers are time limited
thread::Builder::new().name("SWTimeoutThread".to_owned()).spawn(move || {
let sw_lifetime_timeout = PREFS.get("dom.serviceworker.timeout_seconds").as_u64().unwrap();
thread::sleep(Duration::new(sw_lifetime_timeout, 0));
let _ = timer_chan.send(());
}).expect("Thread spawning failed");
global.dispatch_activate();
let reporter_name = format!("service-worker-reporter-{}", random::<u64>());
scope.upcast::<GlobalScope>().mem_profiler_chan().run_with_memory_reporting(|| {
// https://html.spec.whatwg.org/multipage/#event-loop-processing-model
// Step 1
while let Ok(event) = global.receive_event() {
// Step 3
if!global.handle_event(event) {
break;
}
// Step 6
global.upcast::<WorkerGlobalScope>().perform_a_microtask_checkpoint();
}
}, reporter_name, scope.script_chan(), CommonScriptMsg::CollectReports);
}).expect("Thread spawning failed");
}
fn handle_event(&self, event: MixedMessage) -> bool {
match event {
MixedMessage::FromDevtools(msg) => {
match msg {
DevtoolScriptControlMsg::EvaluateJS(_pipe_id, string, sender) =>
devtools::handle_evaluate_js(self.upcast(), string, sender),
DevtoolScriptControlMsg::GetCachedMessages(pipe_id, message_types, sender) =>
devtools::handle_get_cached_messages(pipe_id, message_types, sender),
DevtoolScriptControlMsg::WantsLiveNotifications(_pipe_id, bool_val) =>
devtools::handle_wants_live_notifications(self.upcast(), bool_val),
_ => debug!("got an unusable devtools control message inside the worker!"),
}
true
}
MixedMessage::FromServiceWorker(msg) => {
self.handle_script_event(msg);
true
}
MixedMessage::FromTimeoutThread(_) => {
let _ = self.swmanager_sender.send(ServiceWorkerMsg::Timeout(self.scope_url.clone()));
false
}
}
}
fn handle_script_event(&self, msg: ServiceWorkerScriptMsg) {
use self::ServiceWorkerScriptMsg::*;
match msg {
CommonWorker(WorkerScriptMsg::DOMMessage(data)) => {
let scope = self.upcast::<WorkerGlobalScope>();
let target = self.upcast();
let _ac = JSAutoCompartment::new(scope.get_cx(), scope.reflector().get_jsobject().get());
rooted!(in(scope.get_cx()) let mut message = UndefinedValue());
data.read(scope.upcast(), message.handle_mut());
ExtendableMessageEvent::dispatch_jsval(target, scope.upcast(), message.handle());
},
CommonWorker(WorkerScriptMsg::Common(CommonScriptMsg::RunnableMsg(_, runnable))) => {
runnable.handler()
},
CommonWorker(WorkerScriptMsg::Common(CommonScriptMsg::CollectReports(reports_chan))) => {
let scope = self.upcast::<WorkerGlobalScope>();
let cx = scope.get_cx();
let path_seg = format!("url({})", scope.get_url());
let reports = get_reports(cx, path_seg);
reports_chan.send(reports);
},
Response(mediator) => {
// TODO XXXcreativcoder This will eventually use a FetchEvent interface to fire event
// when we have the Request and Response dom api's implemented
// https://slightlyoff.github.io/ServiceWorker/spec/service_worker_1/index.html#fetch-event-section
self.upcast::<EventTarget>().fire_event(atom!("fetch"));
let _ = mediator.response_chan.send(None);
}
}
}
#[allow(unsafe_code)]
fn receive_event(&self) -> Result<MixedMessage, RecvError> {
let scope = self.upcast::<WorkerGlobalScope>();
let worker_port = &self.receiver;
let devtools_port = scope.from_devtools_receiver();
let timer_event_port = &self.timer_event_port;
let sel = Select::new();
let mut worker_handle = sel.handle(worker_port);
let mut devtools_handle = sel.handle(devtools_port);
let mut timer_port_handle = sel.handle(timer_event_port);
unsafe {
worker_handle.add();
if scope.from_devtools_sender().is_some() {
devtools_handle.add();
}
timer_port_handle.add();
}
let ret = sel.wait();
if ret == worker_handle.id() {
Ok(MixedMessage::FromServiceWorker(worker_port.recv()?))
}else if ret == devtools_handle.id() {
Ok(MixedMessage::FromDevtools(devtools_port.recv()?))
} else if ret == timer_port_handle.id() {
Ok(MixedMessage::FromTimeoutThread(timer_event_port.recv()?))
} else {
panic!("unexpected select result!")
}
}
pub fn
|
(&self, msg: CommonScriptMsg) {
self.handle_script_event(ServiceWorkerScriptMsg::CommonWorker(WorkerScriptMsg::Common(msg)));
}
pub fn script_chan(&self) -> Box<ScriptChan + Send> {
box ServiceWorkerChan {
sender: self.own_sender.clone()
}
}
fn dispatch_activate(&self) {
let event = ExtendableEvent::new(self, atom!("activate"), false, false);
let event = (&*event).upcast::<Event>();
self.upcast::<EventTarget>().dispatch_event(event);
}
}
#[allow(unsafe_code)]
unsafe extern "C" fn interrupt_callback(cx: *mut JSContext) -> bool {
let worker =
Root::downcast::<WorkerGlobalScope>(GlobalScope::from_context(cx))
.expect("global is not a worker scope");
assert!(worker.is::<ServiceWorkerGlobalScope>());
// A false response causes the script to terminate
!worker.is_closing()
}
impl ServiceWorkerGlobalScopeMethods for ServiceWorkerGlobalScope {
// https://w3c.github.io/ServiceWorker/#service-worker-global-scope-onmessage-attribute
event_handler!(message, GetOnmessage, SetOnmessage);
}
|
process_event
|
identifier_name
|
mod.rs
|
/// Chapter 4.23 - Closures
/// [Chapter 4.23] https://doc.rust-lang.org/book/closures.html
pub use self::closures::*;
pub mod closures {
pub fn main() {
println!("====\nClosure:\n");
// We did not need to annotate the
// types of arguments the closure
// takes or the values it returns
let plus_one = |x| x + 1;
println!("Assert check: {}", 3 == plus_one(2));
// Expression for multi-line closure
let plus_two = |x| {
let mut res: i32 = x;
res += 1;
res += 1;
res
};
println!("Assert check: {}", 4 == plus_two(2));
println!("Assert check: {}", simelar_closures());
// The environment for a closure can include bindings
// from its enclosing
// It borrows the binding!
// we took ownership!
let num = 5;
let plus_num = |x: i32| x + num;
println!("Assert sum: {} {}", plus_num(10), num);
move_closures();
}
// Similar symantics for slosures and Function
fn simelar_closures() -> bool {
fn plus_one_v1 (x: i32) -> i32 { x + 1 }
let plus_one_v2 = |x: i32| -> i32 { x + 1 };
let plus_one_v3 = |x: i32| x + 1 ;
plus_one_v1(10) == plus_one_v2(10) &&
plus_one_v2(10) == plus_one_v3(10)
}
fn move_closures()
|
}
|
{
let mut num = 5;
// `num` will be changed
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
// For move closure
let mut num = 5;
{
// Move -> copied `num` value
// We took ownership of a copy
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
}
|
identifier_body
|
mod.rs
|
/// Chapter 4.23 - Closures
/// [Chapter 4.23] https://doc.rust-lang.org/book/closures.html
pub use self::closures::*;
pub mod closures {
pub fn main() {
println!("====\nClosure:\n");
// We did not need to annotate the
// types of arguments the closure
// takes or the values it returns
let plus_one = |x| x + 1;
println!("Assert check: {}", 3 == plus_one(2));
// Expression for multi-line closure
let plus_two = |x| {
|
res += 1;
res
};
println!("Assert check: {}", 4 == plus_two(2));
println!("Assert check: {}", simelar_closures());
// The environment for a closure can include bindings
// from its enclosing
// It borrows the binding!
// we took ownership!
let num = 5;
let plus_num = |x: i32| x + num;
println!("Assert sum: {} {}", plus_num(10), num);
move_closures();
}
// Similar symantics for slosures and Function
fn simelar_closures() -> bool {
fn plus_one_v1 (x: i32) -> i32 { x + 1 }
let plus_one_v2 = |x: i32| -> i32 { x + 1 };
let plus_one_v3 = |x: i32| x + 1 ;
plus_one_v1(10) == plus_one_v2(10) &&
plus_one_v2(10) == plus_one_v3(10)
}
fn move_closures() {
let mut num = 5;
// `num` will be changed
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
// For move closure
let mut num = 5;
{
// Move -> copied `num` value
// We took ownership of a copy
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
}
}
|
let mut res: i32 = x;
res += 1;
|
random_line_split
|
mod.rs
|
/// Chapter 4.23 - Closures
/// [Chapter 4.23] https://doc.rust-lang.org/book/closures.html
pub use self::closures::*;
pub mod closures {
pub fn main() {
println!("====\nClosure:\n");
// We did not need to annotate the
// types of arguments the closure
// takes or the values it returns
let plus_one = |x| x + 1;
println!("Assert check: {}", 3 == plus_one(2));
// Expression for multi-line closure
let plus_two = |x| {
let mut res: i32 = x;
res += 1;
res += 1;
res
};
println!("Assert check: {}", 4 == plus_two(2));
println!("Assert check: {}", simelar_closures());
// The environment for a closure can include bindings
// from its enclosing
// It borrows the binding!
// we took ownership!
let num = 5;
let plus_num = |x: i32| x + num;
println!("Assert sum: {} {}", plus_num(10), num);
move_closures();
}
// Similar symantics for slosures and Function
fn simelar_closures() -> bool {
fn
|
(x: i32) -> i32 { x + 1 }
let plus_one_v2 = |x: i32| -> i32 { x + 1 };
let plus_one_v3 = |x: i32| x + 1 ;
plus_one_v1(10) == plus_one_v2(10) &&
plus_one_v2(10) == plus_one_v3(10)
}
fn move_closures() {
let mut num = 5;
// `num` will be changed
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
// For move closure
let mut num = 5;
{
// Move -> copied `num` value
// We took ownership of a copy
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
}
}
|
plus_one_v1
|
identifier_name
|
update_common.rs
|
use rmpv::Value;
use utils::serialize;
use request_type_key::RequestTypeKey;
use code::Code;
use common_operation::CommonOperation;
use FIX_STR_PREFIX;
use action::Action;
use rmpv::decode::read_value;
#[derive(Debug)]
pub struct UpdateCommon {
pub space: u64,
pub index: u64,
pub operation_type: CommonOperation,
pub field_number: u8,
pub argument: Value,
pub keys: Vec<Value>,
}
impl Action for UpdateCommon {
fn get(&self) -> (RequestTypeKey, Vec<u8>)
|
}
|
{
(RequestTypeKey::Update,
serialize(Value::Map(vec![(Value::from(Code::SpaceId as u8), Value::from(self.space)),
(Value::from(Code::IndexId as u8), Value::from(self.index)),
(Value::from(Code::Key as u8),
Value::from(self.keys.clone())),
(Value::from(Code::Tuple as u8),
Value::from(vec![Value::from(vec![
read_value(
&mut &[&[FIX_STR_PREFIX][..],
&[self.operation_type as u8][..]]
.concat()[..]).unwrap(),
Value::from(self.field_number),
Value::from(self.argument.clone())
])]))])))
}
|
identifier_body
|
update_common.rs
|
use rmpv::Value;
use utils::serialize;
use request_type_key::RequestTypeKey;
use code::Code;
use common_operation::CommonOperation;
use FIX_STR_PREFIX;
use action::Action;
use rmpv::decode::read_value;
#[derive(Debug)]
pub struct
|
{
pub space: u64,
pub index: u64,
pub operation_type: CommonOperation,
pub field_number: u8,
pub argument: Value,
pub keys: Vec<Value>,
}
impl Action for UpdateCommon {
fn get(&self) -> (RequestTypeKey, Vec<u8>) {
(RequestTypeKey::Update,
serialize(Value::Map(vec![(Value::from(Code::SpaceId as u8), Value::from(self.space)),
(Value::from(Code::IndexId as u8), Value::from(self.index)),
(Value::from(Code::Key as u8),
Value::from(self.keys.clone())),
(Value::from(Code::Tuple as u8),
Value::from(vec![Value::from(vec![
read_value(
&mut &[&[FIX_STR_PREFIX][..],
&[self.operation_type as u8][..]]
.concat()[..]).unwrap(),
Value::from(self.field_number),
Value::from(self.argument.clone())
])]))])))
}
}
|
UpdateCommon
|
identifier_name
|
update_common.rs
|
use rmpv::Value;
use utils::serialize;
use request_type_key::RequestTypeKey;
use code::Code;
use common_operation::CommonOperation;
use FIX_STR_PREFIX;
use action::Action;
use rmpv::decode::read_value;
#[derive(Debug)]
pub struct UpdateCommon {
pub space: u64,
pub index: u64,
pub operation_type: CommonOperation,
|
pub field_number: u8,
pub argument: Value,
pub keys: Vec<Value>,
}
impl Action for UpdateCommon {
fn get(&self) -> (RequestTypeKey, Vec<u8>) {
(RequestTypeKey::Update,
serialize(Value::Map(vec![(Value::from(Code::SpaceId as u8), Value::from(self.space)),
(Value::from(Code::IndexId as u8), Value::from(self.index)),
(Value::from(Code::Key as u8),
Value::from(self.keys.clone())),
(Value::from(Code::Tuple as u8),
Value::from(vec![Value::from(vec![
read_value(
&mut &[&[FIX_STR_PREFIX][..],
&[self.operation_type as u8][..]]
.concat()[..]).unwrap(),
Value::from(self.field_number),
Value::from(self.argument.clone())
])]))])))
}
}
|
random_line_split
|
|
scoping_rules_borrowing_aliasing.rs
|
struct Point { x: i32, y: i32, z: i32 }
pub fn main() {
let mut point = Point { x: 0, y: 0, z: 0 };
{
let borrowed_point = &point;
let another_borrow = &point;
// Data can be accessed via the references and the original owner
println!("Point has coordinates: ({}, {}, {})",
borrowed_point.x, another_borrow.y, point.z);
// Error! Can't borrow point as mutable because it's currently
// borrowed as immutable.
//let mutable_borrow = &mut point;
// TODO ^ Try uncommenting this line
// Immutable references go out of scope
|
// Change data via mutable reference
mutable_borrow.x = 5;
mutable_borrow.y = 2;
mutable_borrow.z = 1;
// Error! Can't borrow `point` as immutable because it's currently
// borrowed as mutable.
//let mutable_borrow = &point;
// Error! Can't borrow `point` as immutable because it's currently
// borrowed as mutable.
//let y = &point.y;
// TODO ^ Try uncommenting this line
// Error! Can't print because `println!` takes an immutable reference.
//println!("Point Z coordinate is {}", point.z);
// TODO ^ Try uncommenting this line
// Ok! Mutable references can be passed as immutable to `println!`
println!("Point has coordinates: ({}, {}, {})",
mutable_borrow.x, mutable_borrow.y, mutable_borrow.z);
// Mutable reference goes out of scope
}
// Immutable references to point are allowed again
let borrowed_point = &point;
println!("Point now has coordinates: ({}, {}, {})",
borrowed_point.x, borrowed_point.y, borrowed_point.z);
}
|
}
{
let mutable_borrow = &mut point;
|
random_line_split
|
scoping_rules_borrowing_aliasing.rs
|
struct
|
{ x: i32, y: i32, z: i32 }
pub fn main() {
let mut point = Point { x: 0, y: 0, z: 0 };
{
let borrowed_point = &point;
let another_borrow = &point;
// Data can be accessed via the references and the original owner
println!("Point has coordinates: ({}, {}, {})",
borrowed_point.x, another_borrow.y, point.z);
// Error! Can't borrow point as mutable because it's currently
// borrowed as immutable.
//let mutable_borrow = &mut point;
// TODO ^ Try uncommenting this line
// Immutable references go out of scope
}
{
let mutable_borrow = &mut point;
// Change data via mutable reference
mutable_borrow.x = 5;
mutable_borrow.y = 2;
mutable_borrow.z = 1;
// Error! Can't borrow `point` as immutable because it's currently
// borrowed as mutable.
//let mutable_borrow = &point;
// Error! Can't borrow `point` as immutable because it's currently
// borrowed as mutable.
//let y = &point.y;
// TODO ^ Try uncommenting this line
// Error! Can't print because `println!` takes an immutable reference.
//println!("Point Z coordinate is {}", point.z);
// TODO ^ Try uncommenting this line
// Ok! Mutable references can be passed as immutable to `println!`
println!("Point has coordinates: ({}, {}, {})",
mutable_borrow.x, mutable_borrow.y, mutable_borrow.z);
// Mutable reference goes out of scope
}
// Immutable references to point are allowed again
let borrowed_point = &point;
println!("Point now has coordinates: ({}, {}, {})",
borrowed_point.x, borrowed_point.y, borrowed_point.z);
}
|
Point
|
identifier_name
|
subscriber.rs
|
// Copyright (C) 2015 <Rick Richardson [email protected]>
//
// This software may be modified and distributed under the terms
// of the MIT license. See the LICENSE file for details.
use std::fmt::Display;
use reactive::{Subscriber};
use sendable::Sendable;
pub struct StdoutSubscriber<A> where A : Display {
index: Option<usize>
}
impl<A> StdoutSubscriber<A> where A : Display {
pub fn new() -> StdoutSubscriber<A> {
StdoutSubscriber {
index: None
}
}
}
impl<A> Subscriber for StdoutSubscriber<A> where A : Display {
type Input = A;
fn on_next(&mut self, t: A) -> bool {
println!("{}", t);
true
}
}
pub struct Decoupler<Q, I> where I : Send, Q : Sendable {
index: Option<usize>,
data_tx: Q,
}
impl<Q, I> Decoupler<Q, I> where I : Send, Q : Sendable {
|
Decoupler {
index: None,
data_tx: tx,
}
}
}
impl<Q, I> Subscriber for Decoupler<Q, I>
where I : Send,
Q : Sendable<Item=I>
{
type Input = I;
fn on_next(&mut self, t: I) -> bool {
//TODO better handle queue failure, maybe put the returned buf
//isizeo a recovery queue
match self.data_tx.send(t) {
Ok(()) => true,
Err(_) => false
}
}
}
pub struct Collect<'a, I> where I : 'a {
index: Option<usize>,
val: &'a mut Box<Vec<I>>
}
impl<'a, I> Collect<'a, I> where I : 'a {
pub fn new(v : &'a mut Box<Vec<I>>) -> Collect<'a, I> {
Collect {
index: None,
val: v
}
}
}
impl<'a, I> Subscriber for Collect<'a, I> where I : 'a {
type Input = I;
fn on_next(&mut self, t: I) -> bool {
self.val.push(t);
true
}
}
|
pub fn new(tx: Q) -> Decoupler<Q,I> {
|
random_line_split
|
subscriber.rs
|
// Copyright (C) 2015 <Rick Richardson [email protected]>
//
// This software may be modified and distributed under the terms
// of the MIT license. See the LICENSE file for details.
use std::fmt::Display;
use reactive::{Subscriber};
use sendable::Sendable;
pub struct StdoutSubscriber<A> where A : Display {
index: Option<usize>
}
impl<A> StdoutSubscriber<A> where A : Display {
pub fn new() -> StdoutSubscriber<A> {
StdoutSubscriber {
index: None
}
}
}
impl<A> Subscriber for StdoutSubscriber<A> where A : Display {
type Input = A;
fn on_next(&mut self, t: A) -> bool
|
}
pub struct Decoupler<Q, I> where I : Send, Q : Sendable {
index: Option<usize>,
data_tx: Q,
}
impl<Q, I> Decoupler<Q, I> where I : Send, Q : Sendable {
pub fn new(tx: Q) -> Decoupler<Q,I> {
Decoupler {
index: None,
data_tx: tx,
}
}
}
impl<Q, I> Subscriber for Decoupler<Q, I>
where I : Send,
Q : Sendable<Item=I>
{
type Input = I;
fn on_next(&mut self, t: I) -> bool {
//TODO better handle queue failure, maybe put the returned buf
//isizeo a recovery queue
match self.data_tx.send(t) {
Ok(()) => true,
Err(_) => false
}
}
}
pub struct Collect<'a, I> where I : 'a {
index: Option<usize>,
val: &'a mut Box<Vec<I>>
}
impl<'a, I> Collect<'a, I> where I : 'a {
pub fn new(v : &'a mut Box<Vec<I>>) -> Collect<'a, I> {
Collect {
index: None,
val: v
}
}
}
impl<'a, I> Subscriber for Collect<'a, I> where I : 'a {
type Input = I;
fn on_next(&mut self, t: I) -> bool {
self.val.push(t);
true
}
}
|
{
println!("{}", t);
true
}
|
identifier_body
|
subscriber.rs
|
// Copyright (C) 2015 <Rick Richardson [email protected]>
//
// This software may be modified and distributed under the terms
// of the MIT license. See the LICENSE file for details.
use std::fmt::Display;
use reactive::{Subscriber};
use sendable::Sendable;
pub struct StdoutSubscriber<A> where A : Display {
index: Option<usize>
}
impl<A> StdoutSubscriber<A> where A : Display {
pub fn new() -> StdoutSubscriber<A> {
StdoutSubscriber {
index: None
}
}
}
impl<A> Subscriber for StdoutSubscriber<A> where A : Display {
type Input = A;
fn on_next(&mut self, t: A) -> bool {
println!("{}", t);
true
}
}
pub struct Decoupler<Q, I> where I : Send, Q : Sendable {
index: Option<usize>,
data_tx: Q,
}
impl<Q, I> Decoupler<Q, I> where I : Send, Q : Sendable {
pub fn new(tx: Q) -> Decoupler<Q,I> {
Decoupler {
index: None,
data_tx: tx,
}
}
}
impl<Q, I> Subscriber for Decoupler<Q, I>
where I : Send,
Q : Sendable<Item=I>
{
type Input = I;
fn on_next(&mut self, t: I) -> bool {
//TODO better handle queue failure, maybe put the returned buf
//isizeo a recovery queue
match self.data_tx.send(t) {
Ok(()) => true,
Err(_) => false
}
}
}
pub struct Collect<'a, I> where I : 'a {
index: Option<usize>,
val: &'a mut Box<Vec<I>>
}
impl<'a, I> Collect<'a, I> where I : 'a {
pub fn
|
(v : &'a mut Box<Vec<I>>) -> Collect<'a, I> {
Collect {
index: None,
val: v
}
}
}
impl<'a, I> Subscriber for Collect<'a, I> where I : 'a {
type Input = I;
fn on_next(&mut self, t: I) -> bool {
self.val.push(t);
true
}
}
|
new
|
identifier_name
|
issue_234.rs
|
use zip::result::ZipError;
const BUF: &[u8] = &[
0, 80, 75, 1, 2, 127, 120, 0, 3, 3, 75, 80, 232, 3, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 7, 0, 0, 0,
0, 65, 0, 1, 0, 0, 0, 4, 0, 0, 224, 255, 0, 255, 255, 255, 255, 255, 255, 20, 39, 221, 221,
221, 221, 221, 221, 205, 221, 221, 221, 42, 221, 221, 221, 221, 221, 221, 221, 221, 38, 34, 34,
219, 80, 75, 5, 6, 0, 0, 0, 0, 5, 96, 0, 1, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 234, 236, 124,
221, 221, 37, 221, 221, 221, 221, 221, 129, 4, 0, 0, 221, 221, 80, 75, 1, 2, 127, 120, 0, 4, 0,
0, 2, 127, 120, 0, 79, 75, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0,
234, 0, 0, 0, 3, 8, 4, 232, 3, 0, 0, 0, 255, 255, 255, 255, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 3, 0,
221, 209, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 58, 58, 42, 75, 9, 2, 127,
120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213,
213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213,
213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 234, 0, 0, 0, 3, 8,
0, 0, 0, 12, 0, 0, 0, 0, 0, 3, 0, 0, 0, 7, 0, 0, 0, 0, 0, 58, 58, 58, 42, 175, 221, 253, 221,
221, 221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0,
213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 221, 221,
221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213,
213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236,
];
#[test]
fn
|
() {
let reader = std::io::Cursor::new(&BUF);
let archive = zip::ZipArchive::new(reader);
match archive {
Err(ZipError::InvalidArchive(_)) => {}
value => panic!("Unexpected value: {:?}", value),
}
}
|
invalid_header
|
identifier_name
|
issue_234.rs
|
use zip::result::ZipError;
const BUF: &[u8] = &[
0, 80, 75, 1, 2, 127, 120, 0, 3, 3, 75, 80, 232, 3, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 7, 0, 0, 0,
0, 65, 0, 1, 0, 0, 0, 4, 0, 0, 224, 255, 0, 255, 255, 255, 255, 255, 255, 20, 39, 221, 221,
221, 221, 221, 221, 205, 221, 221, 221, 42, 221, 221, 221, 221, 221, 221, 221, 221, 38, 34, 34,
219, 80, 75, 5, 6, 0, 0, 0, 0, 5, 96, 0, 1, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 234, 236, 124,
221, 221, 37, 221, 221, 221, 221, 221, 129, 4, 0, 0, 221, 221, 80, 75, 1, 2, 127, 120, 0, 4, 0,
0, 2, 127, 120, 0, 79, 75, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0,
234, 0, 0, 0, 3, 8, 4, 232, 3, 0, 0, 0, 255, 255, 255, 255, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 3, 0,
221, 209, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 58, 58, 42, 75, 9, 2, 127,
|
120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213,
213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213,
213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 234, 0, 0, 0, 3, 8,
0, 0, 0, 12, 0, 0, 0, 0, 0, 3, 0, 0, 0, 7, 0, 0, 0, 0, 0, 58, 58, 58, 42, 175, 221, 253, 221,
221, 221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0,
213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 221, 221,
221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213,
213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236,
];
#[test]
fn invalid_header() {
let reader = std::io::Cursor::new(&BUF);
let archive = zip::ZipArchive::new(reader);
match archive {
Err(ZipError::InvalidArchive(_)) => {}
value => panic!("Unexpected value: {:?}", value),
}
}
|
random_line_split
|
|
issue_234.rs
|
use zip::result::ZipError;
const BUF: &[u8] = &[
0, 80, 75, 1, 2, 127, 120, 0, 3, 3, 75, 80, 232, 3, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 7, 0, 0, 0,
0, 65, 0, 1, 0, 0, 0, 4, 0, 0, 224, 255, 0, 255, 255, 255, 255, 255, 255, 20, 39, 221, 221,
221, 221, 221, 221, 205, 221, 221, 221, 42, 221, 221, 221, 221, 221, 221, 221, 221, 38, 34, 34,
219, 80, 75, 5, 6, 0, 0, 0, 0, 5, 96, 0, 1, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 234, 236, 124,
221, 221, 37, 221, 221, 221, 221, 221, 129, 4, 0, 0, 221, 221, 80, 75, 1, 2, 127, 120, 0, 4, 0,
0, 2, 127, 120, 0, 79, 75, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0,
234, 0, 0, 0, 3, 8, 4, 232, 3, 0, 0, 0, 255, 255, 255, 255, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 3, 0,
221, 209, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 58, 58, 42, 75, 9, 2, 127,
120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213,
213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213,
213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 234, 0, 0, 0, 3, 8,
0, 0, 0, 12, 0, 0, 0, 0, 0, 3, 0, 0, 0, 7, 0, 0, 0, 0, 0, 58, 58, 58, 42, 175, 221, 253, 221,
221, 221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0,
213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 221, 221,
221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213,
213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236,
];
#[test]
fn invalid_header() {
let reader = std::io::Cursor::new(&BUF);
let archive = zip::ZipArchive::new(reader);
match archive {
Err(ZipError::InvalidArchive(_)) =>
|
value => panic!("Unexpected value: {:?}", value),
}
}
|
{}
|
conditional_block
|
issue_234.rs
|
use zip::result::ZipError;
const BUF: &[u8] = &[
0, 80, 75, 1, 2, 127, 120, 0, 3, 3, 75, 80, 232, 3, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 7, 0, 0, 0,
0, 65, 0, 1, 0, 0, 0, 4, 0, 0, 224, 255, 0, 255, 255, 255, 255, 255, 255, 20, 39, 221, 221,
221, 221, 221, 221, 205, 221, 221, 221, 42, 221, 221, 221, 221, 221, 221, 221, 221, 38, 34, 34,
219, 80, 75, 5, 6, 0, 0, 0, 0, 5, 96, 0, 1, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 234, 236, 124,
221, 221, 37, 221, 221, 221, 221, 221, 129, 4, 0, 0, 221, 221, 80, 75, 1, 2, 127, 120, 0, 4, 0,
0, 2, 127, 120, 0, 79, 75, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0,
234, 0, 0, 0, 3, 8, 4, 232, 3, 0, 0, 0, 255, 255, 255, 255, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 3, 0,
221, 209, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 58, 58, 42, 75, 9, 2, 127,
120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213, 213, 213, 213, 213, 213,
213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213, 213,
213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 234, 0, 0, 0, 3, 8,
0, 0, 0, 12, 0, 0, 0, 0, 0, 3, 0, 0, 0, 7, 0, 0, 0, 0, 0, 58, 58, 58, 42, 175, 221, 253, 221,
221, 221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0,
213, 213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236, 221, 221,
221, 221, 221, 80, 75, 9, 2, 127, 120, 0, 99, 99, 99, 99, 99, 99, 94, 7, 0, 0, 0, 0, 0, 0, 213,
213, 213, 213, 213, 213, 213, 213, 213, 7, 0, 0, 211, 211, 211, 211, 124, 236,
];
#[test]
fn invalid_header()
|
{
let reader = std::io::Cursor::new(&BUF);
let archive = zip::ZipArchive::new(reader);
match archive {
Err(ZipError::InvalidArchive(_)) => {}
value => panic!("Unexpected value: {:?}", value),
}
}
|
identifier_body
|
|
adt-tuple-struct.rs
|
// Unit test for the "user substitutions" that are annotated on each
// node.
struct SomeStruct<T>(T);
fn no_annot() {
let c = 66;
SomeStruct(&c);
}
fn annot_underscore() {
let c = 66;
SomeStruct::<_>(&c);
}
fn annot_reference_any_lifetime() {
let c = 66;
SomeStruct::<&u32>(&c);
}
fn annot_reference_static_lifetime() {
let c = 66;
SomeStruct::<&'static u32>(&c); //~ ERROR
}
fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
let c = 66;
SomeStruct::<&'a u32>(&c); //~ ERROR
}
fn annot_reference_named_lifetime_ok<'a>(c: &'a u32) {
SomeStruct::<&'a u32>(c);
}
fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
let _closure = || {
let c = 66;
SomeStruct::<&'a u32>(&c); //~ ERROR
};
}
fn annot_reference_named_lifetime_in_closure_ok<'a>(c: &'a u32) {
let _closure = || {
SomeStruct::<&'a u32>(c);
|
};
}
fn main() { }
|
random_line_split
|
|
adt-tuple-struct.rs
|
// Unit test for the "user substitutions" that are annotated on each
// node.
struct SomeStruct<T>(T);
fn no_annot() {
let c = 66;
SomeStruct(&c);
}
fn annot_underscore() {
let c = 66;
SomeStruct::<_>(&c);
}
fn annot_reference_any_lifetime() {
let c = 66;
SomeStruct::<&u32>(&c);
}
fn annot_reference_static_lifetime() {
let c = 66;
SomeStruct::<&'static u32>(&c); //~ ERROR
}
fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
let c = 66;
SomeStruct::<&'a u32>(&c); //~ ERROR
}
fn
|
<'a>(c: &'a u32) {
SomeStruct::<&'a u32>(c);
}
fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
let _closure = || {
let c = 66;
SomeStruct::<&'a u32>(&c); //~ ERROR
};
}
fn annot_reference_named_lifetime_in_closure_ok<'a>(c: &'a u32) {
let _closure = || {
SomeStruct::<&'a u32>(c);
};
}
fn main() { }
|
annot_reference_named_lifetime_ok
|
identifier_name
|
adt-tuple-struct.rs
|
// Unit test for the "user substitutions" that are annotated on each
// node.
struct SomeStruct<T>(T);
fn no_annot() {
let c = 66;
SomeStruct(&c);
}
fn annot_underscore() {
let c = 66;
SomeStruct::<_>(&c);
}
fn annot_reference_any_lifetime() {
let c = 66;
SomeStruct::<&u32>(&c);
}
fn annot_reference_static_lifetime() {
let c = 66;
SomeStruct::<&'static u32>(&c); //~ ERROR
}
fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
let c = 66;
SomeStruct::<&'a u32>(&c); //~ ERROR
}
fn annot_reference_named_lifetime_ok<'a>(c: &'a u32) {
SomeStruct::<&'a u32>(c);
}
fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
let _closure = || {
let c = 66;
SomeStruct::<&'a u32>(&c); //~ ERROR
};
}
fn annot_reference_named_lifetime_in_closure_ok<'a>(c: &'a u32)
|
fn main() { }
|
{
let _closure = || {
SomeStruct::<&'a u32>(c);
};
}
|
identifier_body
|
iterator_utils.rs
|
pub(crate) struct CartesianProductIterator<'a, I1, I2> {
v1: &'a [I1],
v2: &'a [I2],
v1_idx: usize,
v2_idx: usize
}
impl <'a, I1, I2> CartesianProductIterator<'a, I1, I2> {
pub fn new(v1: &'a [I1], v2: &'a [I2]) -> Self {
CartesianProductIterator { v1, v2, v1_idx: 0, v2_idx: 0 }
}
}
impl <'a, I1: 'a, I2> Iterator for CartesianProductIterator<'a, I1, I2> {
type Item = (&'a I1, &'a I2);
fn next(&mut self) -> Option<Self::Item> {
if self.v1.is_empty() || self.v2.is_empty() || self.v1_idx == self.v1.len() &&
self.v2_idx == self.v2.len() {
None
} else if self.v2_idx == self.v2.len()
|
else {
self.v2_idx += 1;
Some((self.v1.get(self.v1_idx).unwrap(), self.v2.get(self.v2_idx - 1).unwrap()))
}
}
}
#[cfg(test)]
mod tests {
use expectest::prelude::*;
use super::CartesianProductIterator;
#[test]
fn cartesian_product_iterator_empty_array_tests() {
expect!(CartesianProductIterator::new(&Vec::<usize>::new(), &Vec::<usize>::new()).next()).to(be_none());
expect!(CartesianProductIterator::new(&vec![1], &Vec::<usize>::new()).next()).to(be_none());
expect!(CartesianProductIterator::new(&Vec::<usize>::new(), &vec![1]).next()).to(be_none());
}
#[test]
fn cartesian_product_iterator_tests() {
let vec1 = vec![1];
let vec2 = vec![2];
let mut i1 = CartesianProductIterator::new(&vec1, &vec2);
expect!(i1.next()).to(be_some().value((&1, &2)));
expect!(i1.next()).to(be_none());
let vec3 = vec![1, 2];
let mut i2 = CartesianProductIterator::new(&vec3, &vec2);
expect!(i2.next()).to(be_some().value((&1, &2)));
expect!(i2.next()).to(be_some().value((&2, &2)));
expect!(i2.next()).to(be_none());
let mut i3 = CartesianProductIterator::new(&vec1, &vec3);
expect!(i3.next()).to(be_some().value((&1, &1)));
expect!(i3.next()).to(be_some().value((&1, &2)));
expect!(i3.next()).to(be_none());
let vec4 = vec![2, 3];
let mut i4 = CartesianProductIterator::new(&vec3, &vec4);
expect!(i4.next()).to(be_some().value((&1, &2)));
expect!(i4.next()).to(be_some().value((&1, &3)));
expect!(i4.next()).to(be_some().value((&2, &2)));
expect!(i4.next()).to(be_some().value((&2, &3)));
expect!(i4.next()).to(be_none());
}
}
|
{
self.v2_idx = 1;
self.v1_idx += 1;
if self.v1_idx == self.v1.len() {
None
} else {
Some((self.v1.get(self.v1_idx).unwrap(), self.v2.get(self.v2_idx - 1).unwrap()))
}
}
|
conditional_block
|
iterator_utils.rs
|
pub(crate) struct CartesianProductIterator<'a, I1, I2> {
v1: &'a [I1],
v2: &'a [I2],
v1_idx: usize,
v2_idx: usize
}
impl <'a, I1, I2> CartesianProductIterator<'a, I1, I2> {
pub fn new(v1: &'a [I1], v2: &'a [I2]) -> Self {
CartesianProductIterator { v1, v2, v1_idx: 0, v2_idx: 0 }
}
}
impl <'a, I1: 'a, I2> Iterator for CartesianProductIterator<'a, I1, I2> {
type Item = (&'a I1, &'a I2);
fn next(&mut self) -> Option<Self::Item> {
if self.v1.is_empty() || self.v2.is_empty() || self.v1_idx == self.v1.len() &&
self.v2_idx == self.v2.len() {
None
} else if self.v2_idx == self.v2.len() {
self.v2_idx = 1;
self.v1_idx += 1;
if self.v1_idx == self.v1.len() {
None
} else {
Some((self.v1.get(self.v1_idx).unwrap(), self.v2.get(self.v2_idx - 1).unwrap()))
}
} else {
self.v2_idx += 1;
Some((self.v1.get(self.v1_idx).unwrap(), self.v2.get(self.v2_idx - 1).unwrap()))
}
}
}
#[cfg(test)]
mod tests {
use expectest::prelude::*;
use super::CartesianProductIterator;
#[test]
fn cartesian_product_iterator_empty_array_tests() {
expect!(CartesianProductIterator::new(&Vec::<usize>::new(), &Vec::<usize>::new()).next()).to(be_none());
expect!(CartesianProductIterator::new(&vec![1], &Vec::<usize>::new()).next()).to(be_none());
expect!(CartesianProductIterator::new(&Vec::<usize>::new(), &vec![1]).next()).to(be_none());
}
#[test]
|
expect!(i1.next()).to(be_some().value((&1, &2)));
expect!(i1.next()).to(be_none());
let vec3 = vec![1, 2];
let mut i2 = CartesianProductIterator::new(&vec3, &vec2);
expect!(i2.next()).to(be_some().value((&1, &2)));
expect!(i2.next()).to(be_some().value((&2, &2)));
expect!(i2.next()).to(be_none());
let mut i3 = CartesianProductIterator::new(&vec1, &vec3);
expect!(i3.next()).to(be_some().value((&1, &1)));
expect!(i3.next()).to(be_some().value((&1, &2)));
expect!(i3.next()).to(be_none());
let vec4 = vec![2, 3];
let mut i4 = CartesianProductIterator::new(&vec3, &vec4);
expect!(i4.next()).to(be_some().value((&1, &2)));
expect!(i4.next()).to(be_some().value((&1, &3)));
expect!(i4.next()).to(be_some().value((&2, &2)));
expect!(i4.next()).to(be_some().value((&2, &3)));
expect!(i4.next()).to(be_none());
}
}
|
fn cartesian_product_iterator_tests() {
let vec1 = vec![1];
let vec2 = vec![2];
let mut i1 = CartesianProductIterator::new(&vec1, &vec2);
|
random_line_split
|
iterator_utils.rs
|
pub(crate) struct CartesianProductIterator<'a, I1, I2> {
v1: &'a [I1],
v2: &'a [I2],
v1_idx: usize,
v2_idx: usize
}
impl <'a, I1, I2> CartesianProductIterator<'a, I1, I2> {
pub fn new(v1: &'a [I1], v2: &'a [I2]) -> Self {
CartesianProductIterator { v1, v2, v1_idx: 0, v2_idx: 0 }
}
}
impl <'a, I1: 'a, I2> Iterator for CartesianProductIterator<'a, I1, I2> {
type Item = (&'a I1, &'a I2);
fn
|
(&mut self) -> Option<Self::Item> {
if self.v1.is_empty() || self.v2.is_empty() || self.v1_idx == self.v1.len() &&
self.v2_idx == self.v2.len() {
None
} else if self.v2_idx == self.v2.len() {
self.v2_idx = 1;
self.v1_idx += 1;
if self.v1_idx == self.v1.len() {
None
} else {
Some((self.v1.get(self.v1_idx).unwrap(), self.v2.get(self.v2_idx - 1).unwrap()))
}
} else {
self.v2_idx += 1;
Some((self.v1.get(self.v1_idx).unwrap(), self.v2.get(self.v2_idx - 1).unwrap()))
}
}
}
#[cfg(test)]
mod tests {
use expectest::prelude::*;
use super::CartesianProductIterator;
#[test]
fn cartesian_product_iterator_empty_array_tests() {
expect!(CartesianProductIterator::new(&Vec::<usize>::new(), &Vec::<usize>::new()).next()).to(be_none());
expect!(CartesianProductIterator::new(&vec![1], &Vec::<usize>::new()).next()).to(be_none());
expect!(CartesianProductIterator::new(&Vec::<usize>::new(), &vec![1]).next()).to(be_none());
}
#[test]
fn cartesian_product_iterator_tests() {
let vec1 = vec![1];
let vec2 = vec![2];
let mut i1 = CartesianProductIterator::new(&vec1, &vec2);
expect!(i1.next()).to(be_some().value((&1, &2)));
expect!(i1.next()).to(be_none());
let vec3 = vec![1, 2];
let mut i2 = CartesianProductIterator::new(&vec3, &vec2);
expect!(i2.next()).to(be_some().value((&1, &2)));
expect!(i2.next()).to(be_some().value((&2, &2)));
expect!(i2.next()).to(be_none());
let mut i3 = CartesianProductIterator::new(&vec1, &vec3);
expect!(i3.next()).to(be_some().value((&1, &1)));
expect!(i3.next()).to(be_some().value((&1, &2)));
expect!(i3.next()).to(be_none());
let vec4 = vec![2, 3];
let mut i4 = CartesianProductIterator::new(&vec3, &vec4);
expect!(i4.next()).to(be_some().value((&1, &2)));
expect!(i4.next()).to(be_some().value((&1, &3)));
expect!(i4.next()).to(be_some().value((&2, &2)));
expect!(i4.next()).to(be_some().value((&2, &3)));
expect!(i4.next()).to(be_none());
}
}
|
next
|
identifier_name
|
font.rs
|
use std::fmt;
use crossfont::Size as FontSize;
use serde::de::{self, Visitor};
use serde::{Deserialize, Deserializer};
use alacritty_config_derive::ConfigDeserialize;
use crate::config::ui_config::Delta;
/// Font config.
///
/// Defaults are provided at the level of this struct per platform, but not per
/// field in this struct. It might be nice in the future to have defaults for
/// each value independently. Alternatively, maybe erroring when the user
/// doesn't provide complete config is Ok.
#[derive(ConfigDeserialize, Debug, Clone, PartialEq, Eq)]
pub struct Font {
/// Extra spacing per character.
pub offset: Delta<i8>,
/// Glyph offset within character cell.
pub glyph_offset: Delta<i8>,
pub use_thin_strokes: bool,
/// Normal font face.
normal: FontDescription,
/// Bold font face.
bold: SecondaryFontDescription,
/// Italic font face.
italic: SecondaryFontDescription,
/// Bold italic font face.
bold_italic: SecondaryFontDescription,
/// Font size in points.
size: Size,
/// Whether to use the built-in font for box drawing characters.
pub builtin_box_drawing: bool,
}
impl Font {
/// Get a font clone with a size modification.
|
Font { size: Size(size),..self }
}
#[inline]
pub fn size(&self) -> FontSize {
self.size.0
}
/// Get normal font description.
pub fn normal(&self) -> &FontDescription {
&self.normal
}
/// Get bold font description.
pub fn bold(&self) -> FontDescription {
self.bold.desc(&self.normal)
}
/// Get italic font description.
pub fn italic(&self) -> FontDescription {
self.italic.desc(&self.normal)
}
/// Get bold italic font description.
pub fn bold_italic(&self) -> FontDescription {
self.bold_italic.desc(&self.normal)
}
}
impl Default for Font {
fn default() -> Font {
Self {
builtin_box_drawing: true,
use_thin_strokes: Default::default(),
glyph_offset: Default::default(),
bold_italic: Default::default(),
italic: Default::default(),
offset: Default::default(),
normal: Default::default(),
bold: Default::default(),
size: Default::default(),
}
}
}
/// Description of the normal font.
#[derive(ConfigDeserialize, Debug, Clone, PartialEq, Eq)]
pub struct FontDescription {
pub family: String,
pub style: Option<String>,
}
impl Default for FontDescription {
fn default() -> FontDescription {
FontDescription {
#[cfg(not(any(target_os = "macos", windows)))]
family: "monospace".into(),
#[cfg(target_os = "macos")]
family: "Menlo".into(),
#[cfg(windows)]
family: "Consolas".into(),
style: None,
}
}
}
/// Description of the italic and bold font.
#[derive(ConfigDeserialize, Debug, Default, Clone, PartialEq, Eq)]
pub struct SecondaryFontDescription {
family: Option<String>,
style: Option<String>,
}
impl SecondaryFontDescription {
pub fn desc(&self, fallback: &FontDescription) -> FontDescription {
FontDescription {
family: self.family.clone().unwrap_or_else(|| fallback.family.clone()),
style: self.style.clone(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct Size(FontSize);
impl Default for Size {
fn default() -> Self {
Self(FontSize::new(11.))
}
}
impl<'de> Deserialize<'de> for Size {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct NumVisitor;
impl<'v> Visitor<'v> for NumVisitor {
type Value = Size;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("f64 or u64")
}
fn visit_f64<E: de::Error>(self, value: f64) -> Result<Self::Value, E> {
Ok(Size(FontSize::new(value as f32)))
}
fn visit_u64<E: de::Error>(self, value: u64) -> Result<Self::Value, E> {
Ok(Size(FontSize::new(value as f32)))
}
}
deserializer.deserialize_any(NumVisitor)
}
}
|
pub fn with_size(self, size: FontSize) -> Font {
|
random_line_split
|
font.rs
|
use std::fmt;
use crossfont::Size as FontSize;
use serde::de::{self, Visitor};
use serde::{Deserialize, Deserializer};
use alacritty_config_derive::ConfigDeserialize;
use crate::config::ui_config::Delta;
/// Font config.
///
/// Defaults are provided at the level of this struct per platform, but not per
/// field in this struct. It might be nice in the future to have defaults for
/// each value independently. Alternatively, maybe erroring when the user
/// doesn't provide complete config is Ok.
#[derive(ConfigDeserialize, Debug, Clone, PartialEq, Eq)]
pub struct Font {
/// Extra spacing per character.
pub offset: Delta<i8>,
/// Glyph offset within character cell.
pub glyph_offset: Delta<i8>,
pub use_thin_strokes: bool,
/// Normal font face.
normal: FontDescription,
/// Bold font face.
bold: SecondaryFontDescription,
/// Italic font face.
italic: SecondaryFontDescription,
/// Bold italic font face.
bold_italic: SecondaryFontDescription,
/// Font size in points.
size: Size,
/// Whether to use the built-in font for box drawing characters.
pub builtin_box_drawing: bool,
}
impl Font {
/// Get a font clone with a size modification.
pub fn with_size(self, size: FontSize) -> Font {
Font { size: Size(size),..self }
}
#[inline]
pub fn size(&self) -> FontSize {
self.size.0
}
/// Get normal font description.
pub fn normal(&self) -> &FontDescription {
&self.normal
}
/// Get bold font description.
pub fn bold(&self) -> FontDescription {
self.bold.desc(&self.normal)
}
/// Get italic font description.
pub fn italic(&self) -> FontDescription {
self.italic.desc(&self.normal)
}
/// Get bold italic font description.
pub fn bold_italic(&self) -> FontDescription {
self.bold_italic.desc(&self.normal)
}
}
impl Default for Font {
fn default() -> Font {
Self {
builtin_box_drawing: true,
use_thin_strokes: Default::default(),
glyph_offset: Default::default(),
bold_italic: Default::default(),
italic: Default::default(),
offset: Default::default(),
normal: Default::default(),
bold: Default::default(),
size: Default::default(),
}
}
}
/// Description of the normal font.
#[derive(ConfigDeserialize, Debug, Clone, PartialEq, Eq)]
pub struct FontDescription {
pub family: String,
pub style: Option<String>,
}
impl Default for FontDescription {
fn default() -> FontDescription {
FontDescription {
#[cfg(not(any(target_os = "macos", windows)))]
family: "monospace".into(),
#[cfg(target_os = "macos")]
family: "Menlo".into(),
#[cfg(windows)]
family: "Consolas".into(),
style: None,
}
}
}
/// Description of the italic and bold font.
#[derive(ConfigDeserialize, Debug, Default, Clone, PartialEq, Eq)]
pub struct SecondaryFontDescription {
family: Option<String>,
style: Option<String>,
}
impl SecondaryFontDescription {
pub fn desc(&self, fallback: &FontDescription) -> FontDescription {
FontDescription {
family: self.family.clone().unwrap_or_else(|| fallback.family.clone()),
style: self.style.clone(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct Size(FontSize);
impl Default for Size {
fn default() -> Self
|
}
impl<'de> Deserialize<'de> for Size {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct NumVisitor;
impl<'v> Visitor<'v> for NumVisitor {
type Value = Size;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("f64 or u64")
}
fn visit_f64<E: de::Error>(self, value: f64) -> Result<Self::Value, E> {
Ok(Size(FontSize::new(value as f32)))
}
fn visit_u64<E: de::Error>(self, value: u64) -> Result<Self::Value, E> {
Ok(Size(FontSize::new(value as f32)))
}
}
deserializer.deserialize_any(NumVisitor)
}
}
|
{
Self(FontSize::new(11.))
}
|
identifier_body
|
font.rs
|
use std::fmt;
use crossfont::Size as FontSize;
use serde::de::{self, Visitor};
use serde::{Deserialize, Deserializer};
use alacritty_config_derive::ConfigDeserialize;
use crate::config::ui_config::Delta;
/// Font config.
///
/// Defaults are provided at the level of this struct per platform, but not per
/// field in this struct. It might be nice in the future to have defaults for
/// each value independently. Alternatively, maybe erroring when the user
/// doesn't provide complete config is Ok.
#[derive(ConfigDeserialize, Debug, Clone, PartialEq, Eq)]
pub struct Font {
/// Extra spacing per character.
pub offset: Delta<i8>,
/// Glyph offset within character cell.
pub glyph_offset: Delta<i8>,
pub use_thin_strokes: bool,
/// Normal font face.
normal: FontDescription,
/// Bold font face.
bold: SecondaryFontDescription,
/// Italic font face.
italic: SecondaryFontDescription,
/// Bold italic font face.
bold_italic: SecondaryFontDescription,
/// Font size in points.
size: Size,
/// Whether to use the built-in font for box drawing characters.
pub builtin_box_drawing: bool,
}
impl Font {
/// Get a font clone with a size modification.
pub fn with_size(self, size: FontSize) -> Font {
Font { size: Size(size),..self }
}
#[inline]
pub fn size(&self) -> FontSize {
self.size.0
}
/// Get normal font description.
pub fn normal(&self) -> &FontDescription {
&self.normal
}
/// Get bold font description.
pub fn bold(&self) -> FontDescription {
self.bold.desc(&self.normal)
}
/// Get italic font description.
pub fn italic(&self) -> FontDescription {
self.italic.desc(&self.normal)
}
/// Get bold italic font description.
pub fn bold_italic(&self) -> FontDescription {
self.bold_italic.desc(&self.normal)
}
}
impl Default for Font {
fn default() -> Font {
Self {
builtin_box_drawing: true,
use_thin_strokes: Default::default(),
glyph_offset: Default::default(),
bold_italic: Default::default(),
italic: Default::default(),
offset: Default::default(),
normal: Default::default(),
bold: Default::default(),
size: Default::default(),
}
}
}
/// Description of the normal font.
#[derive(ConfigDeserialize, Debug, Clone, PartialEq, Eq)]
pub struct FontDescription {
pub family: String,
pub style: Option<String>,
}
impl Default for FontDescription {
fn default() -> FontDescription {
FontDescription {
#[cfg(not(any(target_os = "macos", windows)))]
family: "monospace".into(),
#[cfg(target_os = "macos")]
family: "Menlo".into(),
#[cfg(windows)]
family: "Consolas".into(),
style: None,
}
}
}
/// Description of the italic and bold font.
#[derive(ConfigDeserialize, Debug, Default, Clone, PartialEq, Eq)]
pub struct SecondaryFontDescription {
family: Option<String>,
style: Option<String>,
}
impl SecondaryFontDescription {
pub fn desc(&self, fallback: &FontDescription) -> FontDescription {
FontDescription {
family: self.family.clone().unwrap_or_else(|| fallback.family.clone()),
style: self.style.clone(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct Size(FontSize);
impl Default for Size {
fn default() -> Self {
Self(FontSize::new(11.))
}
}
impl<'de> Deserialize<'de> for Size {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct NumVisitor;
impl<'v> Visitor<'v> for NumVisitor {
type Value = Size;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("f64 or u64")
}
fn
|
<E: de::Error>(self, value: f64) -> Result<Self::Value, E> {
Ok(Size(FontSize::new(value as f32)))
}
fn visit_u64<E: de::Error>(self, value: u64) -> Result<Self::Value, E> {
Ok(Size(FontSize::new(value as f32)))
}
}
deserializer.deserialize_any(NumVisitor)
}
}
|
visit_f64
|
identifier_name
|
util.rs
|
use std::ffi::{CString, CStr};
use std::str;
use libc::{c_char, c_int};
use err::HdfsErr;
use native::*;
use dfs::HdfsFs;
pub fn str_to_chars(s: &str) -> *const c_char {
CString::new(s.as_bytes()).unwrap().as_ptr()
}
pub fn chars_to_str<'a>(chars: *const c_char) -> &'a str {
let slice = unsafe { CStr::from_ptr(chars) }.to_bytes();
str::from_utf8(slice).unwrap()
}
pub fn bool_to_c_int(val: bool) -> c_int {
if val
|
else { 0 }
}
/// Hdfs Utility
pub struct HdfsUtil;
/// HDFS Utility
impl HdfsUtil {
/// Copy file from one filesystem to another.
///
/// #### Params
/// * ```srcFS``` - The handle to source filesystem.
/// * ```src``` - The path of source file.
/// * ```dstFS``` - The handle to destination filesystem.
/// * ```dst``` - The path of destination file.
pub fn copy(src_fs: &HdfsFs, src: &str, dst_fs: &HdfsFs, dst: &str)
-> Result<bool, HdfsErr> {
let res = unsafe {
hdfsCopy(src_fs.raw(), str_to_chars(src), dst_fs.raw(), str_to_chars(dst))
};
if res == 0 {
Ok(true)
} else {
Err(HdfsErr::Unknown)
}
}
/// Move file from one filesystem to another.
///
/// #### Params
/// * ```srcFS``` - The handle to source filesystem.
/// * ```src``` - The path of source file.
/// * ```dstFS``` - The handle to destination filesystem.
/// * ```dst``` - The path of destination file.
pub fn mv(src_fs: &HdfsFs, src: &str, dst_fs: &HdfsFs, dst: &str)
-> Result<bool, HdfsErr> {
let res = unsafe {
hdfsMove(src_fs.raw(), str_to_chars(src), dst_fs.raw(), str_to_chars(dst))
};
if res == 0 {
Ok(true)
} else {
Err(HdfsErr::Unknown)
}
}
}
|
{ 1 }
|
conditional_block
|
util.rs
|
use std::ffi::{CString, CStr};
use std::str;
use libc::{c_char, c_int};
use err::HdfsErr;
use native::*;
use dfs::HdfsFs;
pub fn str_to_chars(s: &str) -> *const c_char {
CString::new(s.as_bytes()).unwrap().as_ptr()
}
pub fn chars_to_str<'a>(chars: *const c_char) -> &'a str {
let slice = unsafe { CStr::from_ptr(chars) }.to_bytes();
str::from_utf8(slice).unwrap()
}
pub fn bool_to_c_int(val: bool) -> c_int {
if val { 1 } else { 0 }
}
/// Hdfs Utility
pub struct HdfsUtil;
/// HDFS Utility
impl HdfsUtil {
/// Copy file from one filesystem to another.
///
/// #### Params
/// * ```srcFS``` - The handle to source filesystem.
/// * ```src``` - The path of source file.
/// * ```dstFS``` - The handle to destination filesystem.
/// * ```dst``` - The path of destination file.
pub fn copy(src_fs: &HdfsFs, src: &str, dst_fs: &HdfsFs, dst: &str)
-> Result<bool, HdfsErr> {
let res = unsafe {
hdfsCopy(src_fs.raw(), str_to_chars(src), dst_fs.raw(), str_to_chars(dst))
};
if res == 0 {
Ok(true)
} else {
Err(HdfsErr::Unknown)
}
}
/// Move file from one filesystem to another.
///
/// #### Params
/// * ```srcFS``` - The handle to source filesystem.
/// * ```src``` - The path of source file.
/// * ```dstFS``` - The handle to destination filesystem.
/// * ```dst``` - The path of destination file.
pub fn
|
(src_fs: &HdfsFs, src: &str, dst_fs: &HdfsFs, dst: &str)
-> Result<bool, HdfsErr> {
let res = unsafe {
hdfsMove(src_fs.raw(), str_to_chars(src), dst_fs.raw(), str_to_chars(dst))
};
if res == 0 {
Ok(true)
} else {
Err(HdfsErr::Unknown)
}
}
}
|
mv
|
identifier_name
|
util.rs
|
use std::ffi::{CString, CStr};
use std::str;
use libc::{c_char, c_int};
use err::HdfsErr;
use native::*;
use dfs::HdfsFs;
|
pub fn str_to_chars(s: &str) -> *const c_char {
CString::new(s.as_bytes()).unwrap().as_ptr()
}
pub fn chars_to_str<'a>(chars: *const c_char) -> &'a str {
let slice = unsafe { CStr::from_ptr(chars) }.to_bytes();
str::from_utf8(slice).unwrap()
}
pub fn bool_to_c_int(val: bool) -> c_int {
if val { 1 } else { 0 }
}
/// Hdfs Utility
pub struct HdfsUtil;
/// HDFS Utility
impl HdfsUtil {
/// Copy file from one filesystem to another.
///
/// #### Params
/// * ```srcFS``` - The handle to source filesystem.
/// * ```src``` - The path of source file.
/// * ```dstFS``` - The handle to destination filesystem.
/// * ```dst``` - The path of destination file.
pub fn copy(src_fs: &HdfsFs, src: &str, dst_fs: &HdfsFs, dst: &str)
-> Result<bool, HdfsErr> {
let res = unsafe {
hdfsCopy(src_fs.raw(), str_to_chars(src), dst_fs.raw(), str_to_chars(dst))
};
if res == 0 {
Ok(true)
} else {
Err(HdfsErr::Unknown)
}
}
/// Move file from one filesystem to another.
///
/// #### Params
/// * ```srcFS``` - The handle to source filesystem.
/// * ```src``` - The path of source file.
/// * ```dstFS``` - The handle to destination filesystem.
/// * ```dst``` - The path of destination file.
pub fn mv(src_fs: &HdfsFs, src: &str, dst_fs: &HdfsFs, dst: &str)
-> Result<bool, HdfsErr> {
let res = unsafe {
hdfsMove(src_fs.raw(), str_to_chars(src), dst_fs.raw(), str_to_chars(dst))
};
if res == 0 {
Ok(true)
} else {
Err(HdfsErr::Unknown)
}
}
}
|
random_line_split
|
|
util.rs
|
use std::ffi::{CString, CStr};
use std::str;
use libc::{c_char, c_int};
use err::HdfsErr;
use native::*;
use dfs::HdfsFs;
pub fn str_to_chars(s: &str) -> *const c_char
|
pub fn chars_to_str<'a>(chars: *const c_char) -> &'a str {
let slice = unsafe { CStr::from_ptr(chars) }.to_bytes();
str::from_utf8(slice).unwrap()
}
pub fn bool_to_c_int(val: bool) -> c_int {
if val { 1 } else { 0 }
}
/// Hdfs Utility
pub struct HdfsUtil;
/// HDFS Utility
impl HdfsUtil {
/// Copy file from one filesystem to another.
///
/// #### Params
/// * ```srcFS``` - The handle to source filesystem.
/// * ```src``` - The path of source file.
/// * ```dstFS``` - The handle to destination filesystem.
/// * ```dst``` - The path of destination file.
pub fn copy(src_fs: &HdfsFs, src: &str, dst_fs: &HdfsFs, dst: &str)
-> Result<bool, HdfsErr> {
let res = unsafe {
hdfsCopy(src_fs.raw(), str_to_chars(src), dst_fs.raw(), str_to_chars(dst))
};
if res == 0 {
Ok(true)
} else {
Err(HdfsErr::Unknown)
}
}
/// Move file from one filesystem to another.
///
/// #### Params
/// * ```srcFS``` - The handle to source filesystem.
/// * ```src``` - The path of source file.
/// * ```dstFS``` - The handle to destination filesystem.
/// * ```dst``` - The path of destination file.
pub fn mv(src_fs: &HdfsFs, src: &str, dst_fs: &HdfsFs, dst: &str)
-> Result<bool, HdfsErr> {
let res = unsafe {
hdfsMove(src_fs.raw(), str_to_chars(src), dst_fs.raw(), str_to_chars(dst))
};
if res == 0 {
Ok(true)
} else {
Err(HdfsErr::Unknown)
}
}
}
|
{
CString::new(s.as_bytes()).unwrap().as_ptr()
}
|
identifier_body
|
alias_ok.rs
|
extern crate crucible;
use std::cell::Cell;
use std::mem;
use crucible::*;
use crucible::method_spec::{MethodSpec, MethodSpecBuilder, clobber_globals};
fn f(x: &Cell<u8>, y: &Cell<u8>) {
x.swap(y);
}
#[crux_test]
fn f_test() {
clobber_globals();
let x = Cell::new(u8::symbolic("x"));
let y = Cell::new(u8::symbolic("y"));
crucible_assume!(x.get() > 0);
f(&x, &y);
crucible_assert!(y.get() > 0);
}
fn f_spec() -> MethodSpec {
let x = Cell::new(u8::symbolic("x"));
let y = Cell::new(u8::symbolic("y"));
crucible_assume!(x.get() > 0);
let mut msb = MethodSpecBuilder::new(f);
msb.add_arg(& &x);
|
msb.gather_assumes();
// Call happens here
crucible_assert!(y.get() > 0);
msb.set_return(&());
msb.gather_asserts();
msb.finish()
}
#[crux_test]
fn use_f() {
f_spec().enable();
let a = Cell::new(u8::symbolic("a"));
let b = Cell::new(u8::symbolic("b"));
crucible_assume!(0 < a.get() && a.get() < 10);
crucible_assume!(b.get() == 0);
f(&a, &b);
crucible_assert!(0 < b.get());
crucible_assert!(b.get() < 10);
}
|
msb.add_arg(& &y);
|
random_line_split
|
alias_ok.rs
|
extern crate crucible;
use std::cell::Cell;
use std::mem;
use crucible::*;
use crucible::method_spec::{MethodSpec, MethodSpecBuilder, clobber_globals};
fn f(x: &Cell<u8>, y: &Cell<u8>) {
x.swap(y);
}
#[crux_test]
fn f_test() {
clobber_globals();
let x = Cell::new(u8::symbolic("x"));
let y = Cell::new(u8::symbolic("y"));
crucible_assume!(x.get() > 0);
f(&x, &y);
crucible_assert!(y.get() > 0);
}
fn f_spec() -> MethodSpec {
let x = Cell::new(u8::symbolic("x"));
let y = Cell::new(u8::symbolic("y"));
crucible_assume!(x.get() > 0);
let mut msb = MethodSpecBuilder::new(f);
msb.add_arg(& &x);
msb.add_arg(& &y);
msb.gather_assumes();
// Call happens here
crucible_assert!(y.get() > 0);
msb.set_return(&());
msb.gather_asserts();
msb.finish()
}
#[crux_test]
fn use_f()
|
{
f_spec().enable();
let a = Cell::new(u8::symbolic("a"));
let b = Cell::new(u8::symbolic("b"));
crucible_assume!(0 < a.get() && a.get() < 10);
crucible_assume!(b.get() == 0);
f(&a, &b);
crucible_assert!(0 < b.get());
crucible_assert!(b.get() < 10);
}
|
identifier_body
|
|
alias_ok.rs
|
extern crate crucible;
use std::cell::Cell;
use std::mem;
use crucible::*;
use crucible::method_spec::{MethodSpec, MethodSpecBuilder, clobber_globals};
fn
|
(x: &Cell<u8>, y: &Cell<u8>) {
x.swap(y);
}
#[crux_test]
fn f_test() {
clobber_globals();
let x = Cell::new(u8::symbolic("x"));
let y = Cell::new(u8::symbolic("y"));
crucible_assume!(x.get() > 0);
f(&x, &y);
crucible_assert!(y.get() > 0);
}
fn f_spec() -> MethodSpec {
let x = Cell::new(u8::symbolic("x"));
let y = Cell::new(u8::symbolic("y"));
crucible_assume!(x.get() > 0);
let mut msb = MethodSpecBuilder::new(f);
msb.add_arg(& &x);
msb.add_arg(& &y);
msb.gather_assumes();
// Call happens here
crucible_assert!(y.get() > 0);
msb.set_return(&());
msb.gather_asserts();
msb.finish()
}
#[crux_test]
fn use_f() {
f_spec().enable();
let a = Cell::new(u8::symbolic("a"));
let b = Cell::new(u8::symbolic("b"));
crucible_assume!(0 < a.get() && a.get() < 10);
crucible_assume!(b.get() == 0);
f(&a, &b);
crucible_assert!(0 < b.get());
crucible_assert!(b.get() < 10);
}
|
f
|
identifier_name
|
util.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Common utilities for computation kernels.
use crate::array::*;
use crate::bitmap::Bitmap;
use crate::buffer::Buffer;
use crate::error::Result;
/// Applies a given binary operation, `op`, to two references to `Option<Bitmap>`'s.
///
/// This function is useful when implementing operations on higher level arrays.
pub(crate) fn apply_bin_op_to_option_bitmap<F>(
left: &Option<Bitmap>,
right: &Option<Bitmap>,
op: F,
) -> Result<Option<Buffer>>
where
F: Fn(&Buffer, &Buffer) -> Result<Buffer>,
{
match *left {
None => match *right {
None => Ok(None),
Some(ref r) => Ok(Some(r.bits.clone())),
},
Some(ref l) => match *right {
None => Ok(Some(l.bits.clone())),
Some(ref r) => Ok(Some(op(&l.bits, &r.bits)?)),
},
}
}
/// Takes/filters a list array's inner data using the offsets of the list array.
///
/// Where a list array has indices `[0,2,5,10]`, taking indices of `[2,0]` returns
/// an array of the indices `[5..10, 0..2]` and offsets `[0,5,7]` (5 elements and 2
/// elements)
pub(super) fn take_value_indices_from_list(
values: &ArrayRef,
indices: &UInt32Array,
) -> (UInt32Array, Vec<i32>) {
// TODO: benchmark this function, there might be a faster unsafe alternative
// get list array's offsets
let list: &ListArray = values.as_any().downcast_ref::<ListArray>().unwrap();
let offsets: Vec<u32> = (0..=list.len())
.map(|i| list.value_offset(i) as u32)
.collect();
let mut new_offsets = Vec::with_capacity(indices.len());
let mut values = Vec::new();
let mut current_offset = 0;
// add first offset
new_offsets.push(0);
// compute the value indices, and set offsets accordingly
for i in 0..indices.len() {
if indices.is_valid(i) {
let ix = indices.value(i) as usize;
let start = offsets[ix];
let end = offsets[ix + 1];
current_offset += (end - start) as i32;
new_offsets.push(current_offset);
// if start == end, this slot is empty
if start!= end {
// type annotation needed to guide compiler a bit
let mut offsets: Vec<Option<u32>> =
(start..end).map(|v| Some(v)).collect::<Vec<Option<u32>>>();
values.append(&mut offsets);
}
} else {
new_offsets.push(current_offset);
}
}
(UInt32Array::from(values), new_offsets)
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use crate::array::ArrayData;
use crate::datatypes::{DataType, ToByteSlice};
#[test]
fn test_apply_bin_op_to_option_bitmap() {
assert_eq!(
Ok(None),
apply_bin_op_to_option_bitmap(&None, &None, |a, b| a & b)
);
assert_eq!(
Ok(Some(Buffer::from([0b01101010]))),
apply_bin_op_to_option_bitmap(
&Some(Bitmap::from(Buffer::from([0b01101010]))),
&None,
|a, b| a & b
)
);
assert_eq!(
Ok(Some(Buffer::from([0b01001110]))),
apply_bin_op_to_option_bitmap(
&None,
&Some(Bitmap::from(Buffer::from([0b01001110]))),
|a, b| a & b
)
);
assert_eq!(
Ok(Some(Buffer::from([0b01001010]))),
apply_bin_op_to_option_bitmap(
&Some(Bitmap::from(Buffer::from([0b01101010]))),
&Some(Bitmap::from(Buffer::from([0b01001110]))),
|a, b| a & b
)
);
}
#[test]
fn test_take_value_index_from_list()
|
Some(1),
])
.data();
assert_eq!(data, indexed.data());
}
}
|
{
let value_data = Int32Array::from((0..10).collect::<Vec<i32>>()).data();
let value_offsets = Buffer::from(&[0, 2, 5, 10].to_byte_slice());
let list_data_type = DataType::List(Box::new(DataType::Int32));
let list_data = ArrayData::builder(list_data_type.clone())
.len(3)
.add_buffer(value_offsets.clone())
.add_child_data(value_data.clone())
.build();
let array = Arc::new(ListArray::from(list_data)) as ArrayRef;
let index = UInt32Array::from(vec![2, 0]);
let (indexed, offsets) = take_value_indices_from_list(&array, &index);
assert_eq!(vec![0, 5, 7], offsets);
let data = UInt32Array::from(vec![
Some(5),
Some(6),
Some(7),
Some(8),
Some(9),
Some(0),
|
identifier_body
|
util.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Common utilities for computation kernels.
use crate::array::*;
use crate::bitmap::Bitmap;
use crate::buffer::Buffer;
use crate::error::Result;
/// Applies a given binary operation, `op`, to two references to `Option<Bitmap>`'s.
///
/// This function is useful when implementing operations on higher level arrays.
pub(crate) fn apply_bin_op_to_option_bitmap<F>(
left: &Option<Bitmap>,
right: &Option<Bitmap>,
op: F,
) -> Result<Option<Buffer>>
where
F: Fn(&Buffer, &Buffer) -> Result<Buffer>,
{
match *left {
None => match *right {
None => Ok(None),
Some(ref r) => Ok(Some(r.bits.clone())),
},
Some(ref l) => match *right {
None => Ok(Some(l.bits.clone())),
Some(ref r) => Ok(Some(op(&l.bits, &r.bits)?)),
},
}
}
/// Takes/filters a list array's inner data using the offsets of the list array.
///
/// Where a list array has indices `[0,2,5,10]`, taking indices of `[2,0]` returns
/// an array of the indices `[5..10, 0..2]` and offsets `[0,5,7]` (5 elements and 2
/// elements)
pub(super) fn take_value_indices_from_list(
values: &ArrayRef,
indices: &UInt32Array,
) -> (UInt32Array, Vec<i32>) {
// TODO: benchmark this function, there might be a faster unsafe alternative
// get list array's offsets
let list: &ListArray = values.as_any().downcast_ref::<ListArray>().unwrap();
let offsets: Vec<u32> = (0..=list.len())
.map(|i| list.value_offset(i) as u32)
.collect();
let mut new_offsets = Vec::with_capacity(indices.len());
let mut values = Vec::new();
let mut current_offset = 0;
// add first offset
new_offsets.push(0);
// compute the value indices, and set offsets accordingly
for i in 0..indices.len() {
if indices.is_valid(i) {
let ix = indices.value(i) as usize;
let start = offsets[ix];
let end = offsets[ix + 1];
current_offset += (end - start) as i32;
new_offsets.push(current_offset);
// if start == end, this slot is empty
if start!= end {
// type annotation needed to guide compiler a bit
let mut offsets: Vec<Option<u32>> =
(start..end).map(|v| Some(v)).collect::<Vec<Option<u32>>>();
values.append(&mut offsets);
}
} else {
new_offsets.push(current_offset);
}
}
(UInt32Array::from(values), new_offsets)
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use crate::array::ArrayData;
use crate::datatypes::{DataType, ToByteSlice};
#[test]
fn test_apply_bin_op_to_option_bitmap() {
assert_eq!(
Ok(None),
apply_bin_op_to_option_bitmap(&None, &None, |a, b| a & b)
);
assert_eq!(
Ok(Some(Buffer::from([0b01101010]))),
apply_bin_op_to_option_bitmap(
&Some(Bitmap::from(Buffer::from([0b01101010]))),
&None,
|a, b| a & b
)
);
assert_eq!(
Ok(Some(Buffer::from([0b01001110]))),
apply_bin_op_to_option_bitmap(
&None,
&Some(Bitmap::from(Buffer::from([0b01001110]))),
|a, b| a & b
)
);
assert_eq!(
Ok(Some(Buffer::from([0b01001010]))),
apply_bin_op_to_option_bitmap(
&Some(Bitmap::from(Buffer::from([0b01101010]))),
&Some(Bitmap::from(Buffer::from([0b01001110]))),
|a, b| a & b
)
);
}
#[test]
fn
|
() {
let value_data = Int32Array::from((0..10).collect::<Vec<i32>>()).data();
let value_offsets = Buffer::from(&[0, 2, 5, 10].to_byte_slice());
let list_data_type = DataType::List(Box::new(DataType::Int32));
let list_data = ArrayData::builder(list_data_type.clone())
.len(3)
.add_buffer(value_offsets.clone())
.add_child_data(value_data.clone())
.build();
let array = Arc::new(ListArray::from(list_data)) as ArrayRef;
let index = UInt32Array::from(vec![2, 0]);
let (indexed, offsets) = take_value_indices_from_list(&array, &index);
assert_eq!(vec![0, 5, 7], offsets);
let data = UInt32Array::from(vec![
Some(5),
Some(6),
Some(7),
Some(8),
Some(9),
Some(0),
Some(1),
])
.data();
assert_eq!(data, indexed.data());
}
}
|
test_take_value_index_from_list
|
identifier_name
|
util.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Common utilities for computation kernels.
use crate::array::*;
use crate::bitmap::Bitmap;
use crate::buffer::Buffer;
use crate::error::Result;
/// Applies a given binary operation, `op`, to two references to `Option<Bitmap>`'s.
///
/// This function is useful when implementing operations on higher level arrays.
pub(crate) fn apply_bin_op_to_option_bitmap<F>(
left: &Option<Bitmap>,
right: &Option<Bitmap>,
op: F,
) -> Result<Option<Buffer>>
where
F: Fn(&Buffer, &Buffer) -> Result<Buffer>,
{
match *left {
None => match *right {
None => Ok(None),
Some(ref r) => Ok(Some(r.bits.clone())),
},
Some(ref l) => match *right {
None => Ok(Some(l.bits.clone())),
Some(ref r) => Ok(Some(op(&l.bits, &r.bits)?)),
},
}
}
/// Takes/filters a list array's inner data using the offsets of the list array.
///
/// Where a list array has indices `[0,2,5,10]`, taking indices of `[2,0]` returns
/// an array of the indices `[5..10, 0..2]` and offsets `[0,5,7]` (5 elements and 2
/// elements)
pub(super) fn take_value_indices_from_list(
values: &ArrayRef,
indices: &UInt32Array,
) -> (UInt32Array, Vec<i32>) {
// TODO: benchmark this function, there might be a faster unsafe alternative
// get list array's offsets
let list: &ListArray = values.as_any().downcast_ref::<ListArray>().unwrap();
let offsets: Vec<u32> = (0..=list.len())
.map(|i| list.value_offset(i) as u32)
.collect();
let mut new_offsets = Vec::with_capacity(indices.len());
let mut values = Vec::new();
let mut current_offset = 0;
// add first offset
new_offsets.push(0);
// compute the value indices, and set offsets accordingly
for i in 0..indices.len() {
if indices.is_valid(i) {
let ix = indices.value(i) as usize;
let start = offsets[ix];
let end = offsets[ix + 1];
current_offset += (end - start) as i32;
new_offsets.push(current_offset);
// if start == end, this slot is empty
if start!= end {
// type annotation needed to guide compiler a bit
let mut offsets: Vec<Option<u32>> =
(start..end).map(|v| Some(v)).collect::<Vec<Option<u32>>>();
values.append(&mut offsets);
|
}
}
(UInt32Array::from(values), new_offsets)
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use crate::array::ArrayData;
use crate::datatypes::{DataType, ToByteSlice};
#[test]
fn test_apply_bin_op_to_option_bitmap() {
assert_eq!(
Ok(None),
apply_bin_op_to_option_bitmap(&None, &None, |a, b| a & b)
);
assert_eq!(
Ok(Some(Buffer::from([0b01101010]))),
apply_bin_op_to_option_bitmap(
&Some(Bitmap::from(Buffer::from([0b01101010]))),
&None,
|a, b| a & b
)
);
assert_eq!(
Ok(Some(Buffer::from([0b01001110]))),
apply_bin_op_to_option_bitmap(
&None,
&Some(Bitmap::from(Buffer::from([0b01001110]))),
|a, b| a & b
)
);
assert_eq!(
Ok(Some(Buffer::from([0b01001010]))),
apply_bin_op_to_option_bitmap(
&Some(Bitmap::from(Buffer::from([0b01101010]))),
&Some(Bitmap::from(Buffer::from([0b01001110]))),
|a, b| a & b
)
);
}
#[test]
fn test_take_value_index_from_list() {
let value_data = Int32Array::from((0..10).collect::<Vec<i32>>()).data();
let value_offsets = Buffer::from(&[0, 2, 5, 10].to_byte_slice());
let list_data_type = DataType::List(Box::new(DataType::Int32));
let list_data = ArrayData::builder(list_data_type.clone())
.len(3)
.add_buffer(value_offsets.clone())
.add_child_data(value_data.clone())
.build();
let array = Arc::new(ListArray::from(list_data)) as ArrayRef;
let index = UInt32Array::from(vec![2, 0]);
let (indexed, offsets) = take_value_indices_from_list(&array, &index);
assert_eq!(vec![0, 5, 7], offsets);
let data = UInt32Array::from(vec![
Some(5),
Some(6),
Some(7),
Some(8),
Some(9),
Some(0),
Some(1),
])
.data();
assert_eq!(data, indexed.data());
}
}
|
}
} else {
new_offsets.push(current_offset);
|
random_line_split
|
values.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper types and traits for the handling of CSS values.
use app_units::Au;
use std::fmt;
/// The real `ToCss` trait can't be implemented for types in crates that don't
/// depend on each other.
pub trait ToCss {
/// Serialize `self` in CSS syntax, writing to `dest`.
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write;
/// Serialize `self` in CSS syntax and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self) -> String {
let mut s = String::new();
self.to_css(&mut s).unwrap();
s
}
}
/// Marker trait to automatically implement ToCss for Vec<T>.
pub trait OneOrMoreCommaSeparated {}
impl<T> ToCss for Vec<T> where T: ToCss + OneOrMoreCommaSeparated {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write
|
}
impl ToCss for Au {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
write!(dest, "{}px", self.to_f64_px())
}
}
macro_rules! impl_to_css_for_predefined_type {
($name: ty) => {
impl<'a> ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
::cssparser::ToCss::to_css(self, dest)
}
}
};
}
impl_to_css_for_predefined_type!(f32);
impl_to_css_for_predefined_type!(i32);
impl_to_css_for_predefined_type!(u32);
impl_to_css_for_predefined_type!(::cssparser::Token<'a>);
impl_to_css_for_predefined_type!(::cssparser::RGBA);
impl_to_css_for_predefined_type!(::cssparser::Color);
#[macro_export]
macro_rules! define_css_keyword_enum {
($name: ident: $( $css: expr => $variant: ident ),+,) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
($name: ident: $( $css: expr => $variant: ident ),+) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
}
#[cfg(feature = "servo")]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [ Deserialize, Serialize, HeapSizeOf ] [ $( $css => $variant ),+ ]
}
};
}
#[cfg(not(feature = "servo"))]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [] [ $( $css => $variant ),+ ]
}
};
}
#[macro_export]
macro_rules! __define_css_keyword_enum__actual {
($name: ident [ $( $derived_trait: ident),* ] [ $( $css: expr => $variant: ident ),+ ]) => {
#[allow(non_camel_case_types, missing_docs)]
#[derive(Clone, Eq, PartialEq, Copy, Hash, RustcEncodable, Debug $(, $derived_trait )* )]
pub enum $name {
$( $variant ),+
}
impl $name {
/// Parse this property from a CSS input stream.
pub fn parse(input: &mut ::cssparser::Parser) -> Result<$name, ()> {
match_ignore_ascii_case! { try!(input.expect_ident()),
$( $css => Ok($name::$variant), )+
_ => Err(())
}
}
}
impl ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> ::std::fmt::Result
where W: ::std::fmt::Write
{
match *self {
$( $name::$variant => dest.write_str($css) ),+
}
}
}
}
}
/// Helper types for the handling of specified values.
pub mod specified {
use app_units::Au;
/// Whether to allow negative values or not.
#[repr(u8)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AllowedNumericType {
/// Allow all kind of numeric values.
All,
/// Allow only non-negative values.
NonNegative
}
impl AllowedNumericType {
/// Whether value is valid for this allowed numeric type.
#[inline]
pub fn is_ok(&self, value: f32) -> bool {
match *self {
AllowedNumericType::All => true,
AllowedNumericType::NonNegative => value >= 0.,
}
}
/// Clamp the value following the rules of this numeric type.
#[inline]
pub fn clamp(&self, val: Au) -> Au {
use std::cmp;
match *self {
AllowedNumericType::All => val,
AllowedNumericType::NonNegative => cmp::max(Au(0), val),
}
}
}
}
|
{
let mut iter = self.iter();
iter.next().unwrap().to_css(dest)?;
for item in iter {
dest.write_str(", ")?;
item.to_css(dest)?;
}
Ok(())
}
|
identifier_body
|
values.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper types and traits for the handling of CSS values.
use app_units::Au;
use std::fmt;
/// The real `ToCss` trait can't be implemented for types in crates that don't
/// depend on each other.
pub trait ToCss {
/// Serialize `self` in CSS syntax, writing to `dest`.
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write;
/// Serialize `self` in CSS syntax and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self) -> String {
let mut s = String::new();
self.to_css(&mut s).unwrap();
s
}
}
/// Marker trait to automatically implement ToCss for Vec<T>.
pub trait OneOrMoreCommaSeparated {}
impl<T> ToCss for Vec<T> where T: ToCss + OneOrMoreCommaSeparated {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let mut iter = self.iter();
iter.next().unwrap().to_css(dest)?;
for item in iter {
dest.write_str(", ")?;
item.to_css(dest)?;
}
Ok(())
}
}
impl ToCss for Au {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
write!(dest, "{}px", self.to_f64_px())
}
}
macro_rules! impl_to_css_for_predefined_type {
($name: ty) => {
impl<'a> ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
::cssparser::ToCss::to_css(self, dest)
}
}
};
}
impl_to_css_for_predefined_type!(f32);
impl_to_css_for_predefined_type!(i32);
impl_to_css_for_predefined_type!(u32);
impl_to_css_for_predefined_type!(::cssparser::Token<'a>);
impl_to_css_for_predefined_type!(::cssparser::RGBA);
impl_to_css_for_predefined_type!(::cssparser::Color);
#[macro_export]
macro_rules! define_css_keyword_enum {
($name: ident: $( $css: expr => $variant: ident ),+,) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
($name: ident: $( $css: expr => $variant: ident ),+) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
}
#[cfg(feature = "servo")]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [ Deserialize, Serialize, HeapSizeOf ] [ $( $css => $variant ),+ ]
}
};
}
#[cfg(not(feature = "servo"))]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [] [ $( $css => $variant ),+ ]
}
};
}
#[macro_export]
macro_rules! __define_css_keyword_enum__actual {
($name: ident [ $( $derived_trait: ident),* ] [ $( $css: expr => $variant: ident ),+ ]) => {
#[allow(non_camel_case_types, missing_docs)]
#[derive(Clone, Eq, PartialEq, Copy, Hash, RustcEncodable, Debug $(, $derived_trait )* )]
pub enum $name {
$( $variant ),+
}
impl $name {
/// Parse this property from a CSS input stream.
pub fn parse(input: &mut ::cssparser::Parser) -> Result<$name, ()> {
match_ignore_ascii_case! { try!(input.expect_ident()),
$( $css => Ok($name::$variant), )+
_ => Err(())
}
}
}
impl ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> ::std::fmt::Result
where W: ::std::fmt::Write
{
match *self {
$( $name::$variant => dest.write_str($css) ),+
}
}
}
}
}
/// Helper types for the handling of specified values.
pub mod specified {
use app_units::Au;
/// Whether to allow negative values or not.
#[repr(u8)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AllowedNumericType {
/// Allow all kind of numeric values.
All,
/// Allow only non-negative values.
NonNegative
}
impl AllowedNumericType {
/// Whether value is valid for this allowed numeric type.
#[inline]
pub fn
|
(&self, value: f32) -> bool {
match *self {
AllowedNumericType::All => true,
AllowedNumericType::NonNegative => value >= 0.,
}
}
/// Clamp the value following the rules of this numeric type.
#[inline]
pub fn clamp(&self, val: Au) -> Au {
use std::cmp;
match *self {
AllowedNumericType::All => val,
AllowedNumericType::NonNegative => cmp::max(Au(0), val),
}
}
}
}
|
is_ok
|
identifier_name
|
values.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper types and traits for the handling of CSS values.
use app_units::Au;
use std::fmt;
/// The real `ToCss` trait can't be implemented for types in crates that don't
/// depend on each other.
pub trait ToCss {
/// Serialize `self` in CSS syntax, writing to `dest`.
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write;
/// Serialize `self` in CSS syntax and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self) -> String {
let mut s = String::new();
self.to_css(&mut s).unwrap();
s
}
}
/// Marker trait to automatically implement ToCss for Vec<T>.
pub trait OneOrMoreCommaSeparated {}
impl<T> ToCss for Vec<T> where T: ToCss + OneOrMoreCommaSeparated {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let mut iter = self.iter();
iter.next().unwrap().to_css(dest)?;
for item in iter {
dest.write_str(", ")?;
item.to_css(dest)?;
}
Ok(())
}
}
impl ToCss for Au {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
write!(dest, "{}px", self.to_f64_px())
}
}
macro_rules! impl_to_css_for_predefined_type {
($name: ty) => {
impl<'a> ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
::cssparser::ToCss::to_css(self, dest)
}
}
};
}
impl_to_css_for_predefined_type!(f32);
impl_to_css_for_predefined_type!(i32);
impl_to_css_for_predefined_type!(u32);
impl_to_css_for_predefined_type!(::cssparser::Token<'a>);
impl_to_css_for_predefined_type!(::cssparser::RGBA);
impl_to_css_for_predefined_type!(::cssparser::Color);
#[macro_export]
macro_rules! define_css_keyword_enum {
($name: ident: $( $css: expr => $variant: ident ),+,) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
($name: ident: $( $css: expr => $variant: ident ),+) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
}
#[cfg(feature = "servo")]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [ Deserialize, Serialize, HeapSizeOf ] [ $( $css => $variant ),+ ]
}
};
}
#[cfg(not(feature = "servo"))]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [] [ $( $css => $variant ),+ ]
}
};
}
#[macro_export]
macro_rules! __define_css_keyword_enum__actual {
($name: ident [ $( $derived_trait: ident),* ] [ $( $css: expr => $variant: ident ),+ ]) => {
#[allow(non_camel_case_types, missing_docs)]
#[derive(Clone, Eq, PartialEq, Copy, Hash, RustcEncodable, Debug $(, $derived_trait )* )]
pub enum $name {
$( $variant ),+
}
impl $name {
/// Parse this property from a CSS input stream.
|
}
}
}
impl ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> ::std::fmt::Result
where W: ::std::fmt::Write
{
match *self {
$( $name::$variant => dest.write_str($css) ),+
}
}
}
}
}
/// Helper types for the handling of specified values.
pub mod specified {
use app_units::Au;
/// Whether to allow negative values or not.
#[repr(u8)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AllowedNumericType {
/// Allow all kind of numeric values.
All,
/// Allow only non-negative values.
NonNegative
}
impl AllowedNumericType {
/// Whether value is valid for this allowed numeric type.
#[inline]
pub fn is_ok(&self, value: f32) -> bool {
match *self {
AllowedNumericType::All => true,
AllowedNumericType::NonNegative => value >= 0.,
}
}
/// Clamp the value following the rules of this numeric type.
#[inline]
pub fn clamp(&self, val: Au) -> Au {
use std::cmp;
match *self {
AllowedNumericType::All => val,
AllowedNumericType::NonNegative => cmp::max(Au(0), val),
}
}
}
}
|
pub fn parse(input: &mut ::cssparser::Parser) -> Result<$name, ()> {
match_ignore_ascii_case! { try!(input.expect_ident()),
$( $css => Ok($name::$variant), )+
_ => Err(())
|
random_line_split
|
union.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Ben Gamari <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use std::iter::FromIterator;
use syntax::ast;
use syntax::ptr::P;
use syntax::ast_util::empty_generics;
use syntax::codemap::{DUMMY_SP, dummy_spanned, respan, Spanned};
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use node;
use super::Builder;
use super::utils;
enum RegOrPadding<'a> {
/// A register
Reg(&'a node::Reg),
/// A given number of bytes of padding
Pad(u64)
}
/// An iterator which takes a potentially unsorted list of registers,
/// sorts them, and adds padding to make offsets correct
struct PaddedRegsIterator<'a> {
sorted_regs: &'a Vec<node::Reg>,
index: usize,
last_offset: u64,
}
impl<'a> PaddedRegsIterator<'a> {
fn new(regs: &'a mut Vec<node::Reg>) -> PaddedRegsIterator<'a> {
regs.sort_by(|r1,r2| r1.offset.cmp(&r2.offset));
PaddedRegsIterator {
sorted_regs: regs,
index: 0,
last_offset: 0,
}
}
}
impl<'a> Iterator for PaddedRegsIterator<'a> {
type Item = RegOrPadding<'a>;
fn next(&mut self) -> Option<RegOrPadding<'a>> {
if self.index >= self.sorted_regs.len() {
None
} else {
let ref reg = self.sorted_regs[self.index];
if reg.offset > self.last_offset {
let pad_length = reg.offset - self.last_offset;
self.last_offset = reg.offset;
Some(RegOrPadding::Pad(pad_length))
} else {
self.index += 1;
self.last_offset += reg.size();
Some(RegOrPadding::Reg(reg))
}
}
}
}
/// Build types for `RegUnions`
pub struct BuildUnionTypes<'a> {
builder: &'a mut Builder,
cx: &'a ExtCtxt<'a>
}
impl<'a> BuildUnionTypes<'a> {
pub fn new(builder: &'a mut Builder, cx: &'a ExtCtxt<'a>)
-> BuildUnionTypes<'a> {
BuildUnionTypes { builder: builder, cx: cx }
}
}
fn
|
(cx: &ExtCtxt, n: Spanned<u64>) -> P<ast::Expr> {
cx.expr_lit(n.span, ast::LitInt(n.node as u64, ast::UnsignedIntLit(ast::TyUs)))
}
/// Returns the type of the field representing the given register
/// within a `RegGroup` struct
fn reg_struct_type(cx: &ExtCtxt, path: &Vec<String>, reg: &node::Reg)
-> P<ast::Ty> {
let base_ty_path = cx.path_ident(reg.name.span, utils::path_ident(cx, path));
let base_ty: P<ast::Ty> = cx.ty_path(base_ty_path);
match reg.count.node {
1 => base_ty,
n =>
cx.ty(reg.count.span,
ast::TyFixedLengthVec(base_ty, expr_usize(cx, respan(reg.count.span, n as u64)))),
}
}
impl<'a> node::RegVisitor for BuildUnionTypes<'a> {
fn visit_union_reg<'b>(&'b mut self, path: &Vec<String>, reg: &'b node::Reg,
subregs: Rc<Vec<node::Reg>>) {
let items = self.build_union_type(path, reg, &*subregs);
for item in items.into_iter() {
self.builder.push_item(item);
}
}
}
impl<'a> BuildUnionTypes<'a> {
/// Produce a field for the given register in a `RegUnion` struct
fn build_reg_union_field(&self, path: &Vec<String>, reg: &node::Reg)
-> ast::StructField {
let attrs = match reg.docstring {
Some(doc) => vec!(utils::doc_attribute(self.cx, doc.node.name.as_str())),
None => Vec::new(),
};
let mut field_path = path.clone();
field_path.push(reg.name.node.clone());
dummy_spanned(
ast::StructField_ {
kind: ast::NamedField(
self.cx.ident_of(reg.name.node.as_str()),
ast::Public),
id: ast::DUMMY_NODE_ID,
ty: reg_struct_type(self.cx, &field_path, reg),
attrs: attrs,
}
)
}
/// Build field for padding or a register
// Dummy spans allowed here because u8 doesn't come from anywhere
#[allow(dummy_span)]
fn build_pad_or_reg(&self, path: &Vec<String>, reg_or_pad: RegOrPadding,
index: usize) -> ast::StructField {
match reg_or_pad {
RegOrPadding::Reg(reg) => self.build_reg_union_field(path, reg),
RegOrPadding::Pad(length) => {
let u8_path = self.cx.path_ident(
DUMMY_SP,
self.cx.ident_of("u8"));
let u8_ty: P<ast::Ty> = self.cx.ty_path(u8_path);
let ty: P<ast::Ty> =
self.cx.ty(
DUMMY_SP,
ast::TyFixedLengthVec(u8_ty, expr_usize(self.cx, respan(DUMMY_SP, length))));
dummy_spanned(
ast::StructField_ {
kind: ast::NamedField(
self.cx.ident_of(format!("_pad{}", index).as_str()),
ast::Inherited),
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: Vec::new(),
},
)
},
}
}
/// Build the type associated with a register group
fn build_union_type(&self, path: &Vec<String>, reg: &node::Reg,
regs: &Vec<node::Reg>) -> Vec<P<ast::Item>> {
let name = utils::path_ident(self.cx, path);
// Registers are already sorted by parser
let mut regs = regs.clone();
let mut regs2 = regs.clone();
let padded_regs = PaddedRegsIterator::new(&mut regs);
let fields =
padded_regs.enumerate().map(|(n,r)| self.build_pad_or_reg(path, r, n));
let struct_def = ast::StructDef {
fields: FromIterator::from_iter(fields),
ctor_id: None,
};
let mut attrs: Vec<ast::Attribute> = vec!(
utils::list_attribute(self.cx, "allow",
vec!("non_camel_case_types",
"dead_code",
"missing_docs"),
reg.name.span),
);
match reg.docstring {
Some(docstring) =>
attrs.push(
utils::doc_attribute(self.cx, docstring.node.name.as_str())),
None => (),
}
let struct_item = P(ast::Item {
ident: name,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: ast::ItemStruct(P(struct_def), empty_generics()),
vis: ast::Public,
span: reg.name.span,
});
let mut full_size: u64 = 0;
//FIXME(mcoffin) - We're making this iterator twice
let padded_regs2 = PaddedRegsIterator::new(&mut regs2);
padded_regs2.enumerate().map(|(_, rp)| {
full_size += match rp {
RegOrPadding::Reg(reg) => reg.ty.size(),
RegOrPadding::Pad(s) => s,
};
}).count();
let clone_impl = quote_item!(self.cx,
impl ::core::clone::Clone for $name {
fn clone(&self) -> Self {
let mut next: $name = unsafe {
::core::mem::uninitialized()
};
unsafe {
let next_ptr: *mut $name = &mut next;
::core::intrinsics::copy(
::core::mem::transmute(self),
next_ptr,
$full_size as usize);
return next;
}
}
}
).unwrap();
let copy_impl = quote_item!(
self.cx, impl ::core::marker::Copy for $name {}).unwrap();
let item_address = reg.address;
let docstring = format!("Placement getter for register {} at address 0x{:x}",
reg.name.node,
item_address);
let doc_attr = utils::doc_attribute(self.cx, utils::intern_string(
self.cx, docstring));
let item_getter = quote_item!(self.cx,
#[allow(non_snake_case, dead_code)]
$doc_attr
pub fn $name() -> &'static $name {
unsafe { ::core::intrinsics::transmute($item_address as usize) }
}
).unwrap();
if item_address == 0 {
vec!(struct_item, clone_impl, copy_impl)
} else {
vec!(struct_item, clone_impl, copy_impl, item_getter)
}
}
}
|
expr_usize
|
identifier_name
|
union.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Ben Gamari <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use std::iter::FromIterator;
use syntax::ast;
use syntax::ptr::P;
use syntax::ast_util::empty_generics;
use syntax::codemap::{DUMMY_SP, dummy_spanned, respan, Spanned};
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use node;
use super::Builder;
use super::utils;
enum RegOrPadding<'a> {
/// A register
Reg(&'a node::Reg),
/// A given number of bytes of padding
Pad(u64)
}
/// An iterator which takes a potentially unsorted list of registers,
/// sorts them, and adds padding to make offsets correct
struct PaddedRegsIterator<'a> {
sorted_regs: &'a Vec<node::Reg>,
index: usize,
last_offset: u64,
}
impl<'a> PaddedRegsIterator<'a> {
fn new(regs: &'a mut Vec<node::Reg>) -> PaddedRegsIterator<'a> {
regs.sort_by(|r1,r2| r1.offset.cmp(&r2.offset));
PaddedRegsIterator {
sorted_regs: regs,
index: 0,
last_offset: 0,
}
}
}
impl<'a> Iterator for PaddedRegsIterator<'a> {
type Item = RegOrPadding<'a>;
fn next(&mut self) -> Option<RegOrPadding<'a>> {
if self.index >= self.sorted_regs.len() {
None
} else {
let ref reg = self.sorted_regs[self.index];
if reg.offset > self.last_offset {
let pad_length = reg.offset - self.last_offset;
self.last_offset = reg.offset;
Some(RegOrPadding::Pad(pad_length))
} else {
self.index += 1;
self.last_offset += reg.size();
Some(RegOrPadding::Reg(reg))
}
}
}
}
/// Build types for `RegUnions`
pub struct BuildUnionTypes<'a> {
builder: &'a mut Builder,
cx: &'a ExtCtxt<'a>
}
impl<'a> BuildUnionTypes<'a> {
pub fn new(builder: &'a mut Builder, cx: &'a ExtCtxt<'a>)
-> BuildUnionTypes<'a> {
BuildUnionTypes { builder: builder, cx: cx }
}
}
fn expr_usize(cx: &ExtCtxt, n: Spanned<u64>) -> P<ast::Expr> {
cx.expr_lit(n.span, ast::LitInt(n.node as u64, ast::UnsignedIntLit(ast::TyUs)))
}
/// Returns the type of the field representing the given register
/// within a `RegGroup` struct
fn reg_struct_type(cx: &ExtCtxt, path: &Vec<String>, reg: &node::Reg)
-> P<ast::Ty> {
let base_ty_path = cx.path_ident(reg.name.span, utils::path_ident(cx, path));
let base_ty: P<ast::Ty> = cx.ty_path(base_ty_path);
match reg.count.node {
1 => base_ty,
n =>
cx.ty(reg.count.span,
ast::TyFixedLengthVec(base_ty, expr_usize(cx, respan(reg.count.span, n as u64)))),
}
}
impl<'a> node::RegVisitor for BuildUnionTypes<'a> {
fn visit_union_reg<'b>(&'b mut self, path: &Vec<String>, reg: &'b node::Reg,
subregs: Rc<Vec<node::Reg>>) {
let items = self.build_union_type(path, reg, &*subregs);
for item in items.into_iter() {
self.builder.push_item(item);
}
}
}
impl<'a> BuildUnionTypes<'a> {
/// Produce a field for the given register in a `RegUnion` struct
fn build_reg_union_field(&self, path: &Vec<String>, reg: &node::Reg)
-> ast::StructField {
let attrs = match reg.docstring {
Some(doc) => vec!(utils::doc_attribute(self.cx, doc.node.name.as_str())),
None => Vec::new(),
};
let mut field_path = path.clone();
field_path.push(reg.name.node.clone());
dummy_spanned(
ast::StructField_ {
kind: ast::NamedField(
self.cx.ident_of(reg.name.node.as_str()),
ast::Public),
id: ast::DUMMY_NODE_ID,
ty: reg_struct_type(self.cx, &field_path, reg),
attrs: attrs,
}
)
}
/// Build field for padding or a register
// Dummy spans allowed here because u8 doesn't come from anywhere
#[allow(dummy_span)]
fn build_pad_or_reg(&self, path: &Vec<String>, reg_or_pad: RegOrPadding,
index: usize) -> ast::StructField
|
},
)
},
}
}
/// Build the type associated with a register group
fn build_union_type(&self, path: &Vec<String>, reg: &node::Reg,
regs: &Vec<node::Reg>) -> Vec<P<ast::Item>> {
let name = utils::path_ident(self.cx, path);
// Registers are already sorted by parser
let mut regs = regs.clone();
let mut regs2 = regs.clone();
let padded_regs = PaddedRegsIterator::new(&mut regs);
let fields =
padded_regs.enumerate().map(|(n,r)| self.build_pad_or_reg(path, r, n));
let struct_def = ast::StructDef {
fields: FromIterator::from_iter(fields),
ctor_id: None,
};
let mut attrs: Vec<ast::Attribute> = vec!(
utils::list_attribute(self.cx, "allow",
vec!("non_camel_case_types",
"dead_code",
"missing_docs"),
reg.name.span),
);
match reg.docstring {
Some(docstring) =>
attrs.push(
utils::doc_attribute(self.cx, docstring.node.name.as_str())),
None => (),
}
let struct_item = P(ast::Item {
ident: name,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: ast::ItemStruct(P(struct_def), empty_generics()),
vis: ast::Public,
span: reg.name.span,
});
let mut full_size: u64 = 0;
//FIXME(mcoffin) - We're making this iterator twice
let padded_regs2 = PaddedRegsIterator::new(&mut regs2);
padded_regs2.enumerate().map(|(_, rp)| {
full_size += match rp {
RegOrPadding::Reg(reg) => reg.ty.size(),
RegOrPadding::Pad(s) => s,
};
}).count();
let clone_impl = quote_item!(self.cx,
impl ::core::clone::Clone for $name {
fn clone(&self) -> Self {
let mut next: $name = unsafe {
::core::mem::uninitialized()
};
unsafe {
let next_ptr: *mut $name = &mut next;
::core::intrinsics::copy(
::core::mem::transmute(self),
next_ptr,
$full_size as usize);
return next;
}
}
}
).unwrap();
let copy_impl = quote_item!(
self.cx, impl ::core::marker::Copy for $name {}).unwrap();
let item_address = reg.address;
let docstring = format!("Placement getter for register {} at address 0x{:x}",
reg.name.node,
item_address);
let doc_attr = utils::doc_attribute(self.cx, utils::intern_string(
self.cx, docstring));
let item_getter = quote_item!(self.cx,
#[allow(non_snake_case, dead_code)]
$doc_attr
pub fn $name() -> &'static $name {
unsafe { ::core::intrinsics::transmute($item_address as usize) }
}
).unwrap();
if item_address == 0 {
vec!(struct_item, clone_impl, copy_impl)
} else {
vec!(struct_item, clone_impl, copy_impl, item_getter)
}
}
}
|
{
match reg_or_pad {
RegOrPadding::Reg(reg) => self.build_reg_union_field(path, reg),
RegOrPadding::Pad(length) => {
let u8_path = self.cx.path_ident(
DUMMY_SP,
self.cx.ident_of("u8"));
let u8_ty: P<ast::Ty> = self.cx.ty_path(u8_path);
let ty: P<ast::Ty> =
self.cx.ty(
DUMMY_SP,
ast::TyFixedLengthVec(u8_ty, expr_usize(self.cx, respan(DUMMY_SP, length))));
dummy_spanned(
ast::StructField_ {
kind: ast::NamedField(
self.cx.ident_of(format!("_pad{}", index).as_str()),
ast::Inherited),
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: Vec::new(),
|
identifier_body
|
union.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Ben Gamari <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use std::iter::FromIterator;
use syntax::ast;
use syntax::ptr::P;
use syntax::ast_util::empty_generics;
use syntax::codemap::{DUMMY_SP, dummy_spanned, respan, Spanned};
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use node;
use super::Builder;
use super::utils;
enum RegOrPadding<'a> {
/// A register
Reg(&'a node::Reg),
/// A given number of bytes of padding
Pad(u64)
}
/// An iterator which takes a potentially unsorted list of registers,
/// sorts them, and adds padding to make offsets correct
struct PaddedRegsIterator<'a> {
sorted_regs: &'a Vec<node::Reg>,
index: usize,
last_offset: u64,
}
impl<'a> PaddedRegsIterator<'a> {
fn new(regs: &'a mut Vec<node::Reg>) -> PaddedRegsIterator<'a> {
regs.sort_by(|r1,r2| r1.offset.cmp(&r2.offset));
PaddedRegsIterator {
sorted_regs: regs,
index: 0,
last_offset: 0,
}
}
}
impl<'a> Iterator for PaddedRegsIterator<'a> {
type Item = RegOrPadding<'a>;
fn next(&mut self) -> Option<RegOrPadding<'a>> {
if self.index >= self.sorted_regs.len() {
None
} else {
let ref reg = self.sorted_regs[self.index];
if reg.offset > self.last_offset {
let pad_length = reg.offset - self.last_offset;
self.last_offset = reg.offset;
Some(RegOrPadding::Pad(pad_length))
} else
|
}
}
}
/// Build types for `RegUnions`
pub struct BuildUnionTypes<'a> {
builder: &'a mut Builder,
cx: &'a ExtCtxt<'a>
}
impl<'a> BuildUnionTypes<'a> {
pub fn new(builder: &'a mut Builder, cx: &'a ExtCtxt<'a>)
-> BuildUnionTypes<'a> {
BuildUnionTypes { builder: builder, cx: cx }
}
}
fn expr_usize(cx: &ExtCtxt, n: Spanned<u64>) -> P<ast::Expr> {
cx.expr_lit(n.span, ast::LitInt(n.node as u64, ast::UnsignedIntLit(ast::TyUs)))
}
/// Returns the type of the field representing the given register
/// within a `RegGroup` struct
fn reg_struct_type(cx: &ExtCtxt, path: &Vec<String>, reg: &node::Reg)
-> P<ast::Ty> {
let base_ty_path = cx.path_ident(reg.name.span, utils::path_ident(cx, path));
let base_ty: P<ast::Ty> = cx.ty_path(base_ty_path);
match reg.count.node {
1 => base_ty,
n =>
cx.ty(reg.count.span,
ast::TyFixedLengthVec(base_ty, expr_usize(cx, respan(reg.count.span, n as u64)))),
}
}
impl<'a> node::RegVisitor for BuildUnionTypes<'a> {
fn visit_union_reg<'b>(&'b mut self, path: &Vec<String>, reg: &'b node::Reg,
subregs: Rc<Vec<node::Reg>>) {
let items = self.build_union_type(path, reg, &*subregs);
for item in items.into_iter() {
self.builder.push_item(item);
}
}
}
impl<'a> BuildUnionTypes<'a> {
/// Produce a field for the given register in a `RegUnion` struct
fn build_reg_union_field(&self, path: &Vec<String>, reg: &node::Reg)
-> ast::StructField {
let attrs = match reg.docstring {
Some(doc) => vec!(utils::doc_attribute(self.cx, doc.node.name.as_str())),
None => Vec::new(),
};
let mut field_path = path.clone();
field_path.push(reg.name.node.clone());
dummy_spanned(
ast::StructField_ {
kind: ast::NamedField(
self.cx.ident_of(reg.name.node.as_str()),
ast::Public),
id: ast::DUMMY_NODE_ID,
ty: reg_struct_type(self.cx, &field_path, reg),
attrs: attrs,
}
)
}
/// Build field for padding or a register
// Dummy spans allowed here because u8 doesn't come from anywhere
#[allow(dummy_span)]
fn build_pad_or_reg(&self, path: &Vec<String>, reg_or_pad: RegOrPadding,
index: usize) -> ast::StructField {
match reg_or_pad {
RegOrPadding::Reg(reg) => self.build_reg_union_field(path, reg),
RegOrPadding::Pad(length) => {
let u8_path = self.cx.path_ident(
DUMMY_SP,
self.cx.ident_of("u8"));
let u8_ty: P<ast::Ty> = self.cx.ty_path(u8_path);
let ty: P<ast::Ty> =
self.cx.ty(
DUMMY_SP,
ast::TyFixedLengthVec(u8_ty, expr_usize(self.cx, respan(DUMMY_SP, length))));
dummy_spanned(
ast::StructField_ {
kind: ast::NamedField(
self.cx.ident_of(format!("_pad{}", index).as_str()),
ast::Inherited),
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: Vec::new(),
},
)
},
}
}
/// Build the type associated with a register group
fn build_union_type(&self, path: &Vec<String>, reg: &node::Reg,
regs: &Vec<node::Reg>) -> Vec<P<ast::Item>> {
let name = utils::path_ident(self.cx, path);
// Registers are already sorted by parser
let mut regs = regs.clone();
let mut regs2 = regs.clone();
let padded_regs = PaddedRegsIterator::new(&mut regs);
let fields =
padded_regs.enumerate().map(|(n,r)| self.build_pad_or_reg(path, r, n));
let struct_def = ast::StructDef {
fields: FromIterator::from_iter(fields),
ctor_id: None,
};
let mut attrs: Vec<ast::Attribute> = vec!(
utils::list_attribute(self.cx, "allow",
vec!("non_camel_case_types",
"dead_code",
"missing_docs"),
reg.name.span),
);
match reg.docstring {
Some(docstring) =>
attrs.push(
utils::doc_attribute(self.cx, docstring.node.name.as_str())),
None => (),
}
let struct_item = P(ast::Item {
ident: name,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: ast::ItemStruct(P(struct_def), empty_generics()),
vis: ast::Public,
span: reg.name.span,
});
let mut full_size: u64 = 0;
//FIXME(mcoffin) - We're making this iterator twice
let padded_regs2 = PaddedRegsIterator::new(&mut regs2);
padded_regs2.enumerate().map(|(_, rp)| {
full_size += match rp {
RegOrPadding::Reg(reg) => reg.ty.size(),
RegOrPadding::Pad(s) => s,
};
}).count();
let clone_impl = quote_item!(self.cx,
impl ::core::clone::Clone for $name {
fn clone(&self) -> Self {
let mut next: $name = unsafe {
::core::mem::uninitialized()
};
unsafe {
let next_ptr: *mut $name = &mut next;
::core::intrinsics::copy(
::core::mem::transmute(self),
next_ptr,
$full_size as usize);
return next;
}
}
}
).unwrap();
let copy_impl = quote_item!(
self.cx, impl ::core::marker::Copy for $name {}).unwrap();
let item_address = reg.address;
let docstring = format!("Placement getter for register {} at address 0x{:x}",
reg.name.node,
item_address);
let doc_attr = utils::doc_attribute(self.cx, utils::intern_string(
self.cx, docstring));
let item_getter = quote_item!(self.cx,
#[allow(non_snake_case, dead_code)]
$doc_attr
pub fn $name() -> &'static $name {
unsafe { ::core::intrinsics::transmute($item_address as usize) }
}
).unwrap();
if item_address == 0 {
vec!(struct_item, clone_impl, copy_impl)
} else {
vec!(struct_item, clone_impl, copy_impl, item_getter)
}
}
}
|
{
self.index += 1;
self.last_offset += reg.size();
Some(RegOrPadding::Reg(reg))
}
|
conditional_block
|
union.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Ben Gamari <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use std::iter::FromIterator;
use syntax::ast;
use syntax::ptr::P;
use syntax::ast_util::empty_generics;
use syntax::codemap::{DUMMY_SP, dummy_spanned, respan, Spanned};
use syntax::ext::base::ExtCtxt;
use syntax::ext::build::AstBuilder;
use node;
use super::Builder;
use super::utils;
enum RegOrPadding<'a> {
/// A register
Reg(&'a node::Reg),
/// A given number of bytes of padding
Pad(u64)
}
/// An iterator which takes a potentially unsorted list of registers,
/// sorts them, and adds padding to make offsets correct
struct PaddedRegsIterator<'a> {
sorted_regs: &'a Vec<node::Reg>,
index: usize,
last_offset: u64,
}
impl<'a> PaddedRegsIterator<'a> {
fn new(regs: &'a mut Vec<node::Reg>) -> PaddedRegsIterator<'a> {
regs.sort_by(|r1,r2| r1.offset.cmp(&r2.offset));
PaddedRegsIterator {
sorted_regs: regs,
index: 0,
last_offset: 0,
}
}
}
impl<'a> Iterator for PaddedRegsIterator<'a> {
type Item = RegOrPadding<'a>;
fn next(&mut self) -> Option<RegOrPadding<'a>> {
if self.index >= self.sorted_regs.len() {
None
} else {
let ref reg = self.sorted_regs[self.index];
if reg.offset > self.last_offset {
let pad_length = reg.offset - self.last_offset;
self.last_offset = reg.offset;
Some(RegOrPadding::Pad(pad_length))
} else {
self.index += 1;
self.last_offset += reg.size();
Some(RegOrPadding::Reg(reg))
}
}
}
}
/// Build types for `RegUnions`
pub struct BuildUnionTypes<'a> {
builder: &'a mut Builder,
cx: &'a ExtCtxt<'a>
}
impl<'a> BuildUnionTypes<'a> {
pub fn new(builder: &'a mut Builder, cx: &'a ExtCtxt<'a>)
-> BuildUnionTypes<'a> {
BuildUnionTypes { builder: builder, cx: cx }
}
}
fn expr_usize(cx: &ExtCtxt, n: Spanned<u64>) -> P<ast::Expr> {
cx.expr_lit(n.span, ast::LitInt(n.node as u64, ast::UnsignedIntLit(ast::TyUs)))
}
/// Returns the type of the field representing the given register
/// within a `RegGroup` struct
fn reg_struct_type(cx: &ExtCtxt, path: &Vec<String>, reg: &node::Reg)
-> P<ast::Ty> {
let base_ty_path = cx.path_ident(reg.name.span, utils::path_ident(cx, path));
let base_ty: P<ast::Ty> = cx.ty_path(base_ty_path);
match reg.count.node {
1 => base_ty,
n =>
cx.ty(reg.count.span,
ast::TyFixedLengthVec(base_ty, expr_usize(cx, respan(reg.count.span, n as u64)))),
}
}
impl<'a> node::RegVisitor for BuildUnionTypes<'a> {
fn visit_union_reg<'b>(&'b mut self, path: &Vec<String>, reg: &'b node::Reg,
subregs: Rc<Vec<node::Reg>>) {
let items = self.build_union_type(path, reg, &*subregs);
for item in items.into_iter() {
self.builder.push_item(item);
}
}
}
impl<'a> BuildUnionTypes<'a> {
/// Produce a field for the given register in a `RegUnion` struct
fn build_reg_union_field(&self, path: &Vec<String>, reg: &node::Reg)
-> ast::StructField {
let attrs = match reg.docstring {
Some(doc) => vec!(utils::doc_attribute(self.cx, doc.node.name.as_str())),
None => Vec::new(),
};
let mut field_path = path.clone();
field_path.push(reg.name.node.clone());
dummy_spanned(
ast::StructField_ {
|
self.cx.ident_of(reg.name.node.as_str()),
ast::Public),
id: ast::DUMMY_NODE_ID,
ty: reg_struct_type(self.cx, &field_path, reg),
attrs: attrs,
}
)
}
/// Build field for padding or a register
// Dummy spans allowed here because u8 doesn't come from anywhere
#[allow(dummy_span)]
fn build_pad_or_reg(&self, path: &Vec<String>, reg_or_pad: RegOrPadding,
index: usize) -> ast::StructField {
match reg_or_pad {
RegOrPadding::Reg(reg) => self.build_reg_union_field(path, reg),
RegOrPadding::Pad(length) => {
let u8_path = self.cx.path_ident(
DUMMY_SP,
self.cx.ident_of("u8"));
let u8_ty: P<ast::Ty> = self.cx.ty_path(u8_path);
let ty: P<ast::Ty> =
self.cx.ty(
DUMMY_SP,
ast::TyFixedLengthVec(u8_ty, expr_usize(self.cx, respan(DUMMY_SP, length))));
dummy_spanned(
ast::StructField_ {
kind: ast::NamedField(
self.cx.ident_of(format!("_pad{}", index).as_str()),
ast::Inherited),
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: Vec::new(),
},
)
},
}
}
/// Build the type associated with a register group
fn build_union_type(&self, path: &Vec<String>, reg: &node::Reg,
regs: &Vec<node::Reg>) -> Vec<P<ast::Item>> {
let name = utils::path_ident(self.cx, path);
// Registers are already sorted by parser
let mut regs = regs.clone();
let mut regs2 = regs.clone();
let padded_regs = PaddedRegsIterator::new(&mut regs);
let fields =
padded_regs.enumerate().map(|(n,r)| self.build_pad_or_reg(path, r, n));
let struct_def = ast::StructDef {
fields: FromIterator::from_iter(fields),
ctor_id: None,
};
let mut attrs: Vec<ast::Attribute> = vec!(
utils::list_attribute(self.cx, "allow",
vec!("non_camel_case_types",
"dead_code",
"missing_docs"),
reg.name.span),
);
match reg.docstring {
Some(docstring) =>
attrs.push(
utils::doc_attribute(self.cx, docstring.node.name.as_str())),
None => (),
}
let struct_item = P(ast::Item {
ident: name,
attrs: attrs,
id: ast::DUMMY_NODE_ID,
node: ast::ItemStruct(P(struct_def), empty_generics()),
vis: ast::Public,
span: reg.name.span,
});
let mut full_size: u64 = 0;
//FIXME(mcoffin) - We're making this iterator twice
let padded_regs2 = PaddedRegsIterator::new(&mut regs2);
padded_regs2.enumerate().map(|(_, rp)| {
full_size += match rp {
RegOrPadding::Reg(reg) => reg.ty.size(),
RegOrPadding::Pad(s) => s,
};
}).count();
let clone_impl = quote_item!(self.cx,
impl ::core::clone::Clone for $name {
fn clone(&self) -> Self {
let mut next: $name = unsafe {
::core::mem::uninitialized()
};
unsafe {
let next_ptr: *mut $name = &mut next;
::core::intrinsics::copy(
::core::mem::transmute(self),
next_ptr,
$full_size as usize);
return next;
}
}
}
).unwrap();
let copy_impl = quote_item!(
self.cx, impl ::core::marker::Copy for $name {}).unwrap();
let item_address = reg.address;
let docstring = format!("Placement getter for register {} at address 0x{:x}",
reg.name.node,
item_address);
let doc_attr = utils::doc_attribute(self.cx, utils::intern_string(
self.cx, docstring));
let item_getter = quote_item!(self.cx,
#[allow(non_snake_case, dead_code)]
$doc_attr
pub fn $name() -> &'static $name {
unsafe { ::core::intrinsics::transmute($item_address as usize) }
}
).unwrap();
if item_address == 0 {
vec!(struct_item, clone_impl, copy_impl)
} else {
vec!(struct_item, clone_impl, copy_impl, item_getter)
}
}
}
|
kind: ast::NamedField(
|
random_line_split
|
tests_v1kpdb.rs
|
use chrono::{Timelike, Local, TimeZone, Datelike};
use kpdb::v1kpdb::V1Kpdb;
use kpdb::v1error::V1KpdbError;
#[test]
fn test_new() {
// No keyfile and password should give error as result
let mut result = V1Kpdb::new("test/test_password.kdb".to_string(), None, None);
match result {
Ok(_) => assert!(false),
Err(e) => assert_eq!(e, V1KpdbError::PassErr),
};
// Test load at all and parameters
result = V1Kpdb::new("test/test_both.kdb".to_string(), Some("test".to_string()),
Some("test/test_key".to_string()));
assert!(result.is_ok());
let mut db = result.ok().unwrap();
assert_eq!(db.load().is_ok(), true);
assert_eq!(db.path, "test/test_both.kdb");
// Test fail of load with wrong password
result = V1Kpdb::new("test/test_password.kdb".to_string(), Some("tes".to_string()), None);
assert!(result.is_ok());
db = result.ok().unwrap();
match db.load() {
Ok(_) => assert!(false),
Err(e) => assert_eq!(e, V1KpdbError::HashErr),
};
}
#[test]
fn test_create_group_w_title_only() {
let mut result = V1Kpdb::new("test/test_password.kdb".to_string(),
Some("test".to_string()), None);
match result {
Ok(ref mut e) => assert_eq!(e.load().is_ok(), true),
Err(_) => assert!(false),
};
let mut db = result.unwrap();
let num_groups_before = db.header.num_groups;
assert_eq!(db.create_group("test".to_string(), None, None, None).is_ok(), true);
let mut new_group = db.groups[db.groups.len() - 1].borrow_mut();
assert_eq!(new_group.title, "test");
assert_eq!((new_group.expire.year(), new_group.expire.month(), new_group.expire.day()),
(2999, 12, 28));
assert_eq!((new_group.expire.hour(), new_group.expire.minute(), new_group.expire.second()),
|
let parent = new_group.parent.as_mut().unwrap();
assert_eq!(parent.borrow().id, 0);
assert_eq!(db.header.num_groups, num_groups_before + 1);
}
#[test]
fn test_create_group_w_everything() {
let mut result = V1Kpdb::new("test/test_parsing.kdb".to_string(),
Some("test".to_string()), None);
match result {
Ok(ref mut e) => assert_eq!(e.load().is_ok(), true),
Err(_) => assert!(false),
};
let mut db = result.unwrap();
let num_groups_before = db.header.num_groups;
let expire = Local.ymd(2015, 2, 28).and_hms(10,10,10);
let parent = db.groups[1].clone();
println!("{}", parent.borrow().title);
let image = 2;
assert_eq!(db.create_group("test".to_string(), Some(expire), Some(image), Some(parent)).is_ok(), true);
let mut new_group = db.groups[2].borrow_mut();
assert_eq!(new_group.title, "test");
assert_eq!((new_group.expire.year(), new_group.expire.month(), new_group.expire.day()),
(2015, 2, 28));
assert_eq!(new_group.image, 2);
let parent = new_group.parent.as_mut().unwrap();
assert_eq!(parent.borrow().title, "12");
assert_eq!(db.header.num_groups, num_groups_before + 1);
}
|
(23, 59, 59));
assert_eq!(new_group.image, 0);
|
random_line_split
|
tests_v1kpdb.rs
|
use chrono::{Timelike, Local, TimeZone, Datelike};
use kpdb::v1kpdb::V1Kpdb;
use kpdb::v1error::V1KpdbError;
#[test]
fn test_new() {
// No keyfile and password should give error as result
let mut result = V1Kpdb::new("test/test_password.kdb".to_string(), None, None);
match result {
Ok(_) => assert!(false),
Err(e) => assert_eq!(e, V1KpdbError::PassErr),
};
// Test load at all and parameters
result = V1Kpdb::new("test/test_both.kdb".to_string(), Some("test".to_string()),
Some("test/test_key".to_string()));
assert!(result.is_ok());
let mut db = result.ok().unwrap();
assert_eq!(db.load().is_ok(), true);
assert_eq!(db.path, "test/test_both.kdb");
// Test fail of load with wrong password
result = V1Kpdb::new("test/test_password.kdb".to_string(), Some("tes".to_string()), None);
assert!(result.is_ok());
db = result.ok().unwrap();
match db.load() {
Ok(_) => assert!(false),
Err(e) => assert_eq!(e, V1KpdbError::HashErr),
};
}
#[test]
fn test_create_group_w_title_only() {
let mut result = V1Kpdb::new("test/test_password.kdb".to_string(),
Some("test".to_string()), None);
match result {
Ok(ref mut e) => assert_eq!(e.load().is_ok(), true),
Err(_) => assert!(false),
};
let mut db = result.unwrap();
let num_groups_before = db.header.num_groups;
assert_eq!(db.create_group("test".to_string(), None, None, None).is_ok(), true);
let mut new_group = db.groups[db.groups.len() - 1].borrow_mut();
assert_eq!(new_group.title, "test");
assert_eq!((new_group.expire.year(), new_group.expire.month(), new_group.expire.day()),
(2999, 12, 28));
assert_eq!((new_group.expire.hour(), new_group.expire.minute(), new_group.expire.second()),
(23, 59, 59));
assert_eq!(new_group.image, 0);
let parent = new_group.parent.as_mut().unwrap();
assert_eq!(parent.borrow().id, 0);
assert_eq!(db.header.num_groups, num_groups_before + 1);
}
#[test]
fn
|
() {
let mut result = V1Kpdb::new("test/test_parsing.kdb".to_string(),
Some("test".to_string()), None);
match result {
Ok(ref mut e) => assert_eq!(e.load().is_ok(), true),
Err(_) => assert!(false),
};
let mut db = result.unwrap();
let num_groups_before = db.header.num_groups;
let expire = Local.ymd(2015, 2, 28).and_hms(10,10,10);
let parent = db.groups[1].clone();
println!("{}", parent.borrow().title);
let image = 2;
assert_eq!(db.create_group("test".to_string(), Some(expire), Some(image), Some(parent)).is_ok(), true);
let mut new_group = db.groups[2].borrow_mut();
assert_eq!(new_group.title, "test");
assert_eq!((new_group.expire.year(), new_group.expire.month(), new_group.expire.day()),
(2015, 2, 28));
assert_eq!(new_group.image, 2);
let parent = new_group.parent.as_mut().unwrap();
assert_eq!(parent.borrow().title, "12");
assert_eq!(db.header.num_groups, num_groups_before + 1);
}
|
test_create_group_w_everything
|
identifier_name
|
tests_v1kpdb.rs
|
use chrono::{Timelike, Local, TimeZone, Datelike};
use kpdb::v1kpdb::V1Kpdb;
use kpdb::v1error::V1KpdbError;
#[test]
fn test_new()
|
match db.load() {
Ok(_) => assert!(false),
Err(e) => assert_eq!(e, V1KpdbError::HashErr),
};
}
#[test]
fn test_create_group_w_title_only() {
let mut result = V1Kpdb::new("test/test_password.kdb".to_string(),
Some("test".to_string()), None);
match result {
Ok(ref mut e) => assert_eq!(e.load().is_ok(), true),
Err(_) => assert!(false),
};
let mut db = result.unwrap();
let num_groups_before = db.header.num_groups;
assert_eq!(db.create_group("test".to_string(), None, None, None).is_ok(), true);
let mut new_group = db.groups[db.groups.len() - 1].borrow_mut();
assert_eq!(new_group.title, "test");
assert_eq!((new_group.expire.year(), new_group.expire.month(), new_group.expire.day()),
(2999, 12, 28));
assert_eq!((new_group.expire.hour(), new_group.expire.minute(), new_group.expire.second()),
(23, 59, 59));
assert_eq!(new_group.image, 0);
let parent = new_group.parent.as_mut().unwrap();
assert_eq!(parent.borrow().id, 0);
assert_eq!(db.header.num_groups, num_groups_before + 1);
}
#[test]
fn test_create_group_w_everything() {
let mut result = V1Kpdb::new("test/test_parsing.kdb".to_string(),
Some("test".to_string()), None);
match result {
Ok(ref mut e) => assert_eq!(e.load().is_ok(), true),
Err(_) => assert!(false),
};
let mut db = result.unwrap();
let num_groups_before = db.header.num_groups;
let expire = Local.ymd(2015, 2, 28).and_hms(10,10,10);
let parent = db.groups[1].clone();
println!("{}", parent.borrow().title);
let image = 2;
assert_eq!(db.create_group("test".to_string(), Some(expire), Some(image), Some(parent)).is_ok(), true);
let mut new_group = db.groups[2].borrow_mut();
assert_eq!(new_group.title, "test");
assert_eq!((new_group.expire.year(), new_group.expire.month(), new_group.expire.day()),
(2015, 2, 28));
assert_eq!(new_group.image, 2);
let parent = new_group.parent.as_mut().unwrap();
assert_eq!(parent.borrow().title, "12");
assert_eq!(db.header.num_groups, num_groups_before + 1);
}
|
{
// No keyfile and password should give error as result
let mut result = V1Kpdb::new("test/test_password.kdb".to_string(), None, None);
match result {
Ok(_) => assert!(false),
Err(e) => assert_eq!(e, V1KpdbError::PassErr),
};
// Test load at all and parameters
result = V1Kpdb::new("test/test_both.kdb".to_string(), Some("test".to_string()),
Some("test/test_key".to_string()));
assert!(result.is_ok());
let mut db = result.ok().unwrap();
assert_eq!(db.load().is_ok(), true);
assert_eq!(db.path, "test/test_both.kdb");
// Test fail of load with wrong password
result = V1Kpdb::new("test/test_password.kdb".to_string(), Some("tes".to_string()), None);
assert!(result.is_ok());
db = result.ok().unwrap();
|
identifier_body
|
subst.rs
|
pub fn erased(t: VecPerParamSpace<Ty<'tcx>>) -> Substs<'tcx>
{
Substs { types: t, regions: ErasedRegions }
}
pub fn empty() -> Substs<'tcx> {
Substs {
types: VecPerParamSpace::empty(),
regions: NonerasedRegions(VecPerParamSpace::empty()),
}
}
pub fn trans_empty() -> Substs<'tcx> {
Substs {
types: VecPerParamSpace::empty(),
regions: ErasedRegions
}
}
pub fn is_noop(&self) -> bool {
let regions_is_noop = match self.regions {
ErasedRegions => false, // may be used to canonicalize
NonerasedRegions(ref regions) => regions.is_empty(),
};
regions_is_noop && self.types.is_empty()
}
pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
*self.types.get(ty_param_def.space, ty_param_def.index as uint)
}
pub fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.types.iter().any(|&t| ty::type_escapes_depth(t, depth)) || {
match self.regions {
ErasedRegions =>
false,
NonerasedRegions(ref regions) =>
regions.iter().any(|r| r.escapes_depth(depth)),
}
}
}
pub fn self_ty(&self) -> Option<Ty<'tcx>> {
self.types.get_self().map(|&t| t)
}
pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> {
assert!(self.self_ty().is_none());
let mut s = (*self).clone();
s.types.push(SelfSpace, self_ty);
s
}
pub fn erase_regions(self) -> Substs<'tcx> {
let Substs { types, regions: _ } = self;
Substs { types: types, regions: ErasedRegions }
}
/// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
/// to easily access the set of region substitutions.
pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> {
match self.regions {
ErasedRegions => panic!("Erased regions only expected in trans"),
NonerasedRegions(ref r) => r
}
}
/// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
/// to easily access the set of region substitutions.
pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace<ty::Region> {
match self.regions {
ErasedRegions => panic!("Erased regions only expected in trans"),
NonerasedRegions(ref mut r) => r
}
}
pub fn with_method(self,
m_types: Vec<Ty<'tcx>>,
m_regions: Vec<ty::Region>)
-> Substs<'tcx>
{
let Substs { types, regions } = self;
let types = types.with_vec(FnSpace, m_types);
let regions = regions.map(m_regions,
|r, m_regions| r.with_vec(FnSpace, m_regions));
Substs { types: types, regions: regions }
}
}
impl RegionSubsts {
fn map<A, F>(self, a: A, op: F) -> RegionSubsts where
F: FnOnce(VecPerParamSpace<ty::Region>, A) -> VecPerParamSpace<ty::Region>,
{
match self {
ErasedRegions => ErasedRegions,
NonerasedRegions(r) => NonerasedRegions(op(r, a))
}
}
pub fn is_erased(&self) -> bool {
match *self {
ErasedRegions => true,
NonerasedRegions(_) => false,
}
}
}
///////////////////////////////////////////////////////////////////////////
// ParamSpace
#[derive(PartialOrd, Ord, PartialEq, Eq, Copy,
Clone, Hash, RustcEncodable, RustcDecodable, Debug)]
pub enum ParamSpace {
TypeSpace, // Type parameters attached to a type definition, trait, or impl
SelfSpace, // Self parameter on a trait
FnSpace, // Type parameters attached to a method or fn
}
impl ParamSpace {
pub fn all() -> [ParamSpace; 3] {
[TypeSpace, SelfSpace, FnSpace]
}
pub fn to_uint(self) -> uint {
match self {
TypeSpace => 0,
SelfSpace => 1,
FnSpace => 2,
}
}
pub fn from_uint(u: uint) -> ParamSpace {
match u {
0 => TypeSpace,
1 => SelfSpace,
2 => FnSpace,
_ => panic!("Invalid ParamSpace: {}", u)
}
}
}
/// Vector of things sorted by param space. Used to keep
/// the set of things declared on the type, self, or method
/// distinct.
#[derive(PartialEq, Eq, Clone, Hash, RustcEncodable, RustcDecodable)]
pub struct VecPerParamSpace<T> {
// This was originally represented as a tuple with one Vec<T> for
// each variant of ParamSpace, and that remains the abstraction
// that it provides to its clients.
//
// Here is how the representation corresponds to the abstraction
// i.e. the "abstraction function" AF:
//
// AF(self) = (self.content[..self.type_limit],
// self.content[self.type_limit..self.self_limit],
// self.content[self.self_limit..])
type_limit: uint,
self_limit: uint,
content: Vec<T>,
}
/// The `split` function converts one `VecPerParamSpace` into this
/// `SeparateVecsPerParamSpace` structure.
pub struct SeparateVecsPerParamSpace<T> {
pub types: Vec<T>,
pub selfs: Vec<T>,
pub fns: Vec<T>,
}
impl<T: fmt::Debug> fmt::Debug for VecPerParamSpace<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(write!(fmt, "VecPerParamSpace {{"));
for space in &ParamSpace::all() {
try!(write!(fmt, "{:?}: {:?}, ", *space, self.get_slice(*space)));
}
try!(write!(fmt, "}}"));
Ok(())
}
}
impl<T> VecPerParamSpace<T> {
fn limits(&self, space: ParamSpace) -> (uint, uint) {
match space {
TypeSpace => (0, self.type_limit),
SelfSpace => (self.type_limit, self.self_limit),
FnSpace => (self.self_limit, self.content.len()),
}
}
pub fn empty() -> VecPerParamSpace<T> {
VecPerParamSpace {
type_limit: 0,
self_limit: 0,
content: Vec::new()
}
}
pub fn params_from_type(types: Vec<T>) -> VecPerParamSpace<T> {
VecPerParamSpace::empty().with_vec(TypeSpace, types)
}
/// `t` is the type space.
/// `s` is the self space.
/// `a` is the assoc space.
/// `f` is the fn space.
pub fn new(t: Vec<T>, s: Vec<T>, f: Vec<T>) -> VecPerParamSpace<T> {
let type_limit = t.len();
let self_limit = type_limit + s.len();
let mut content = t;
content.extend(s.into_iter());
content.extend(f.into_iter());
VecPerParamSpace {
type_limit: type_limit,
self_limit: self_limit,
content: content,
}
}
fn new_internal(content: Vec<T>, type_limit: uint, self_limit: uint)
-> VecPerParamSpace<T>
{
VecPerParamSpace {
type_limit: type_limit,
self_limit: self_limit,
content: content,
}
}
/// Appends `value` to the vector associated with `space`.
///
/// Unlike the `push` method in `Vec`, this should not be assumed
/// to be a cheap operation (even when amortized over many calls).
pub fn push(&mut self, space: ParamSpace, value: T) {
let (_, limit) = self.limits(space);
match space {
TypeSpace => { self.type_limit += 1; self.self_limit += 1; }
SelfSpace => { self.self_limit += 1; }
FnSpace => { }
}
self.content.insert(limit, value);
}
/// Appends `values` to the vector associated with `space`.
///
/// Unlike the `extend` method in `Vec`, this should not be assumed
/// to be a cheap operation (even when amortized over many calls).
pub fn extend<I:Iterator<Item=T>>(&mut self, space: ParamSpace, values: I) {
// This could be made more efficient, obviously.
for item in values {
self.push(space, item);
}
}
pub fn pop(&mut self, space: ParamSpace) -> Option<T> {
let (start, limit) = self.limits(space);
if start == limit {
None
} else {
match space {
TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; }
SelfSpace => { self.self_limit -= 1; }
FnSpace => {}
}
if self.content.is_empty() {
None
} else {
Some(self.content.remove(limit - 1))
}
}
}
pub fn truncate(&mut self, space: ParamSpace, len: uint) {
// FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
while self.len(space) > len {
self.pop(space);
}
}
pub fn replace(&mut self, space: ParamSpace, elems: Vec<T>) {
// FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
self.truncate(space, 0);
for t in elems {
self.push(space, t);
}
}
pub fn get_self<'a>(&'a self) -> Option<&'a T> {
let v = self.get_slice(SelfSpace);
assert!(v.len() <= 1);
if v.len() == 0 { None } else { Some(&v[0]) }
}
pub fn len(&self, space: ParamSpace) -> uint {
self.get_slice(space).len()
}
pub fn is_empty_in(&self, space: ParamSpace) -> bool {
self.len(space) == 0
}
pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] {
let (start, limit) = self.limits(space);
&self.content[start.. limit]
}
pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] {
let (start, limit) = self.limits(space);
&mut self.content[start.. limit]
}
pub fn opt_get<'a>(&'a self,
space: ParamSpace,
index: uint)
-> Option<&'a T> {
let v = self.get_slice(space);
if index < v.len() { Some(&v[index]) } else { None }
}
pub fn get<'a>(&'a self, space: ParamSpace, index: uint) -> &'a T {
&self.get_slice(space)[index]
}
pub fn iter<'a>(&'a self) -> Iter<'a,T> {
self.content.iter()
}
pub fn into_iter(self) -> IntoIter<T> {
self.content.into_iter()
}
pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> {
EnumeratedItems::new(self)
}
pub fn as_slice(&self) -> &[T] {
&self.content
}
pub fn into_vec(self) -> Vec<T> {
self.content
}
pub fn all_vecs<P>(&self, mut pred: P) -> bool where
P: FnMut(&[T]) -> bool,
{
let spaces = [TypeSpace, SelfSpace, FnSpace];
spaces.iter().all(|&space| { pred(self.get_slice(space)) })
}
pub fn all<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
self.iter().all(pred)
}
pub fn any<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
self.iter().any(pred)
}
pub fn is_empty(&self) -> bool {
self.all_vecs(|v| v.is_empty())
}
pub fn map<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut(&T) -> U {
let result = self.iter().map(pred).collect();
VecPerParamSpace::new_internal(result,
self.type_limit,
self.self_limit)
}
pub fn map_enumerated<U, P>(&self, pred: P) -> VecPerParamSpace<U> where
P: FnMut((ParamSpace, uint, &T)) -> U,
{
let result = self.iter_enumerated().map(pred).collect();
VecPerParamSpace::new_internal(result,
self.type_limit,
self.self_limit)
}
pub fn map_move<U, F>(self, mut pred: F) -> VecPerParamSpace<U> where
F: FnMut(T) -> U,
{
let SeparateVecsPerParamSpace {
types: t,
selfs: s,
fns: f
} = self.split();
VecPerParamSpace::new(t.into_iter().map(|p| pred(p)).collect(),
s.into_iter().map(|p| pred(p)).collect(),
f.into_iter().map(|p| pred(p)).collect())
}
pub fn split(self) -> SeparateVecsPerParamSpace<T> {
let VecPerParamSpace { type_limit, self_limit, content } = self;
let mut content_iter = content.into_iter();
SeparateVecsPerParamSpace {
types: content_iter.by_ref().take(type_limit).collect(),
selfs: content_iter.by_ref().take(self_limit - type_limit).collect(),
fns: content_iter.collect()
}
}
pub fn with_vec(mut self, space: ParamSpace, vec: Vec<T>)
-> VecPerParamSpace<T>
{
assert!(self.is_empty_in(space));
self.replace(space, vec);
self
}
}
#[derive(Clone)]
pub struct EnumeratedItems<'a,T:'a> {
vec: &'a VecPerParamSpace<T>,
space_index: uint,
elem_index: uint
}
impl<'a,T> EnumeratedItems<'a,T> {
fn new(v: &'a VecPerParamSpace<T>) -> EnumeratedItems<'a,T> {
let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 };
result.adjust_space();
result
}
fn adjust_space(&mut self) {
let spaces = ParamSpace::all();
while
self.space_index < spaces.len() &&
self.elem_index >= self.vec.len(spaces[self.space_index])
{
self.space_index += 1;
self.elem_index = 0;
}
}
}
impl<'a,T> Iterator for EnumeratedItems<'a,T> {
type Item = (ParamSpace, uint, &'a T);
fn next(&mut self) -> Option<(ParamSpace, uint, &'a T)> {
let spaces = ParamSpace::all();
if self.space_index < spaces.len() {
let space = spaces[self.space_index];
let index = self.elem_index;
let item = self.vec.get(space, index);
self.elem_index += 1;
self.adjust_space();
Some((space, index, item))
} else {
None
}
}
}
impl<T> IntoIterator for VecPerParamSpace<T> {
type IntoIter = IntoIter<T>;
fn into_iter(self) -> IntoIter<T> {
self.into_vec().into_iter()
}
}
impl<'a,T> IntoIterator for &'a VecPerParamSpace<T> {
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.as_slice().into_iter()
}
}
///////////////////////////////////////////////////////////////////////////
// Public trait `Subst`
//
// Just call `foo.subst(tcx, substs)` to perform a substitution across
// `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when
// there is more information available (for better errors).
pub trait Subst<'tcx> : Sized {
fn subst(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>) -> Self {
self.subst_spanned(tcx, substs, None)
}
fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>,
substs: &Substs<'tcx>,
span: Option<Span>)
-> Self;
}
impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T {
fn subst_spanned(&self,
tcx: &ty::ctxt<'tcx>,
substs: &Substs<'tcx>,
span: Option<Span>)
-> T
{
let mut folder = SubstFolder { tcx: tcx,
substs: substs,
span: span,
root_ty: None,
ty_stack_depth: 0,
region_binders_passed: 0 };
(*self).fold_with(&mut folder)
}
}
///////////////////////////////////////////////////////////////////////////
// The actual substitution engine itself is a type folder.
struct SubstFolder<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
substs: &'a Substs<'tcx>,
// The location for which the substitution is performed, if available.
span: Option<Span>,
// The root type that is being substituted, if available.
root_ty: Option<Ty<'tcx>>,
// Depth of type stack
ty_stack_depth: uint,
// Number of region binders we have passed through while doing the substitution
region_binders_passed: u32,
}
impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx }
fn enter_region_binder(&mut self) {
self.region_binders_passed += 1;
}
fn exit_region_binder(&mut self) {
self.region_binders_passed -= 1;
}
fn fold_region(&mut self, r: ty::Region) -> ty::Region {
// Note: This routine only handles regions that are bound on
// type declarations and other outer declarations, not those
// bound in *fn types*. Region substitution of the bound
// regions that appear in a function signature is done using
// the specialized routine `ty::replace_late_regions()`.
match r {
ty::ReEarlyBound(_, space, i, region_name) => {
match self.substs.regions {
ErasedRegions => ty::ReStatic,
NonerasedRegions(ref regions) =>
match regions.opt_get(space, i as uint) {
Some(&r) => {
self.shift_
|
{
Substs::new(VecPerParamSpace::new(t, vec!(s), Vec::new()),
VecPerParamSpace::new(r, Vec::new(), Vec::new()))
}
|
identifier_body
|
|
subst.rs
|
pub fn trans_empty() -> Substs<'tcx> {
Substs {
types: VecPerParamSpace::empty(),
regions: ErasedRegions
}
}
pub fn is_noop(&self) -> bool {
let regions_is_noop = match self.regions {
ErasedRegions => false, // may be used to canonicalize
NonerasedRegions(ref regions) => regions.is_empty(),
};
regions_is_noop && self.types.is_empty()
}
pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
*self.types.get(ty_param_def.space, ty_param_def.index as uint)
}
pub fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.types.iter().any(|&t| ty::type_escapes_depth(t, depth)) || {
match self.regions {
ErasedRegions =>
false,
NonerasedRegions(ref regions) =>
regions.iter().any(|r| r.escapes_depth(depth)),
}
}
}
pub fn self_ty(&self) -> Option<Ty<'tcx>> {
self.types.get_self().map(|&t| t)
}
pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> {
assert!(self.self_ty().is_none());
let mut s = (*self).clone();
s.types.push(SelfSpace, self_ty);
s
}
pub fn erase_regions(self) -> Substs<'tcx> {
let Substs { types, regions: _ } = self;
Substs { types: types, regions: ErasedRegions }
}
/// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
/// to easily access the set of region substitutions.
pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> {
match self.regions {
ErasedRegions => panic!("Erased regions only expected in trans"),
NonerasedRegions(ref r) => r
}
}
/// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
/// to easily access the set of region substitutions.
pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace<ty::Region> {
match self.regions {
ErasedRegions => panic!("Erased regions only expected in trans"),
NonerasedRegions(ref mut r) => r
}
}
pub fn with_method(self,
m_types: Vec<Ty<'tcx>>,
m_regions: Vec<ty::Region>)
-> Substs<'tcx>
{
let Substs { types, regions } = self;
let types = types.with_vec(FnSpace, m_types);
let regions = regions.map(m_regions,
|r, m_regions| r.with_vec(FnSpace, m_regions));
Substs { types: types, regions: regions }
}
}
impl RegionSubsts {
fn map<A, F>(self, a: A, op: F) -> RegionSubsts where
F: FnOnce(VecPerParamSpace<ty::Region>, A) -> VecPerParamSpace<ty::Region>,
{
match self {
ErasedRegions => ErasedRegions,
NonerasedRegions(r) => NonerasedRegions(op(r, a))
}
}
pub fn is_erased(&self) -> bool {
match *self {
ErasedRegions => true,
NonerasedRegions(_) => false,
}
}
}
///////////////////////////////////////////////////////////////////////////
// ParamSpace
#[derive(PartialOrd, Ord, PartialEq, Eq, Copy,
Clone, Hash, RustcEncodable, RustcDecodable, Debug)]
pub enum ParamSpace {
TypeSpace, // Type parameters attached to a type definition, trait, or impl
SelfSpace, // Self parameter on a trait
FnSpace, // Type parameters attached to a method or fn
}
impl ParamSpace {
pub fn all() -> [ParamSpace; 3] {
[TypeSpace, SelfSpace, FnSpace]
}
pub fn to_uint(self) -> uint {
match self {
TypeSpace => 0,
SelfSpace => 1,
FnSpace => 2,
}
}
pub fn from_uint(u: uint) -> ParamSpace {
match u {
0 => TypeSpace,
1 => SelfSpace,
2 => FnSpace,
_ => panic!("Invalid ParamSpace: {}", u)
}
}
}
/// Vector of things sorted by param space. Used to keep
/// the set of things declared on the type, self, or method
/// distinct.
#[derive(PartialEq, Eq, Clone, Hash, RustcEncodable, RustcDecodable)]
pub struct VecPerParamSpace<T> {
// This was originally represented as a tuple with one Vec<T> for
// each variant of ParamSpace, and that remains the abstraction
// that it provides to its clients.
//
// Here is how the representation corresponds to the abstraction
// i.e. the "abstraction function" AF:
//
// AF(self) = (self.content[..self.type_limit],
// self.content[self.type_limit..self.self_limit],
// self.content[self.self_limit..])
type_limit: uint,
self_limit: uint,
content: Vec<T>,
}
/// The `split` function converts one `VecPerParamSpace` into this
/// `SeparateVecsPerParamSpace` structure.
pub struct SeparateVecsPerParamSpace<T> {
pub types: Vec<T>,
pub selfs: Vec<T>,
pub fns: Vec<T>,
}
impl<T: fmt::Debug> fmt::Debug for VecPerParamSpace<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(write!(fmt, "VecPerParamSpace {{"));
for space in &ParamSpace::all() {
try!(write!(fmt, "{:?}: {:?}, ", *space, self.get_slice(*space)));
}
try!(write!(fmt, "}}"));
Ok(())
}
}
impl<T> VecPerParamSpace<T> {
fn limits(&self, space: ParamSpace) -> (uint, uint) {
match space {
TypeSpace => (0, self.type_limit),
SelfSpace => (self.type_limit, self.self_limit),
FnSpace => (self.self_limit, self.content.len()),
}
}
pub fn empty() -> VecPerParamSpace<T> {
VecPerParamSpace {
type_limit: 0,
self_limit: 0,
content: Vec::new()
}
}
pub fn params_from_type(types: Vec<T>) -> VecPerParamSpace<T> {
VecPerParamSpace::empty().with_vec(TypeSpace, types)
}
/// `t` is the type space.
/// `s` is the self space.
/// `a` is the assoc space.
/// `f` is the fn space.
pub fn new(t: Vec<T>, s: Vec<T>, f: Vec<T>) -> VecPerParamSpace<T> {
let type_limit = t.len();
let self_limit = type_limit + s.len();
let mut content = t;
content.extend(s.into_iter());
content.extend(f.into_iter());
VecPerParamSpace {
type_limit: type_limit,
self_limit: self_limit,
content: content,
}
}
fn new_internal(content: Vec<T>, type_limit: uint, self_limit: uint)
-> VecPerParamSpace<T>
{
VecPerParamSpace {
type_limit: type_limit,
self_limit: self_limit,
content: content,
}
}
/// Appends `value` to the vector associated with `space`.
///
/// Unlike the `push` method in `Vec`, this should not be assumed
/// to be a cheap operation (even when amortized over many calls).
pub fn push(&mut self, space: ParamSpace, value: T) {
let (_, limit) = self.limits(space);
match space {
TypeSpace => { self.type_limit += 1; self.self_limit += 1; }
SelfSpace => { self.self_limit += 1; }
FnSpace => { }
}
self.content.insert(limit, value);
}
/// Appends `values` to the vector associated with `space`.
///
/// Unlike the `extend` method in `Vec`, this should not be assumed
/// to be a cheap operation (even when amortized over many calls).
pub fn extend<I:Iterator<Item=T>>(&mut self, space: ParamSpace, values: I) {
// This could be made more efficient, obviously.
for item in values {
self.push(space, item);
}
}
pub fn pop(&mut self, space: ParamSpace) -> Option<T> {
let (start, limit) = self.limits(space);
if start == limit {
None
} else {
match space {
TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; }
SelfSpace => { self.self_limit -= 1; }
FnSpace => {}
}
if self.content.is_empty() {
None
} else {
Some(self.content.remove(limit - 1))
}
}
}
pub fn truncate(&mut self, space: ParamSpace, len: uint) {
// FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
while self.len(space) > len {
self.pop(space);
}
}
pub fn replace(&mut self, space: ParamSpace, elems: Vec<T>) {
// FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
self.truncate(space, 0);
for t in elems {
self.push(space, t);
}
}
pub fn get_self<'a>(&'a self) -> Option<&'a T> {
let v = self.get_slice(SelfSpace);
assert!(v.len() <= 1);
if v.len() == 0 { None } else { Some(&v[0]) }
}
pub fn len(&self, space: ParamSpace) -> uint {
self.get_slice(space).len()
}
pub fn is_empty_in(&self, space: ParamSpace) -> bool {
self.len(space) == 0
}
pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] {
let (start, limit) = self.limits(space);
&self.content[start.. limit]
}
pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] {
let (start, limit) = self.limits(space);
&mut self.content[start.. limit]
}
pub fn opt_get<'a>(&'a self,
space: ParamSpace,
index: uint)
-> Option<&'a T> {
let v = self.get_slice(space);
if index < v.len() { Some(&v[index]) } else { None }
}
pub fn get<'a>(&'a self, space: ParamSpace, index: uint) -> &'a T {
&self.get_slice(space)[index]
}
pub fn iter<'a>(&'a self) -> Iter<'a,T> {
self.content.iter()
}
pub fn into_iter(self) -> IntoIter<T> {
self.content.into_iter()
}
pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> {
EnumeratedItems::new(self)
}
pub fn as_slice(&self) -> &[T] {
&self.content
}
pub fn into_vec(self) -> Vec<T> {
self.content
}
pub fn all_vecs<P>(&self, mut pred: P) -> bool where
P: FnMut(&[T]) -> bool,
{
let spaces = [TypeSpace, SelfSpace, FnSpace];
spaces.iter().all(|&space| { pred(self.get_slice(space)) })
}
pub fn all<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
self.iter().all(pred)
}
pub fn any<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
self.iter().any(pred)
}
pub fn is_empty(&self) -> bool {
self.all_vecs(|v| v.is_empty())
}
pub fn map<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut(&T) -> U {
let result = self.iter().map(pred).collect();
VecPerParamSpace::new_internal(result,
self.type_limit,
self.self_limit)
}
pub fn map_enumerated<U, P>(&self, pred: P) -> VecPerParamSpace<U> where
P: FnMut((ParamSpace, uint, &T)) -> U,
{
let result = self.iter_enumerated().map(pred).collect();
VecPerParamSpace::new_internal(result,
self.type_limit,
self.self_limit)
}
pub fn map_move<U, F>(self, mut pred: F) -> VecPerParamSpace<U> where
F: FnMut(T) -> U,
{
let SeparateVecsPerParamSpace {
types: t,
selfs: s,
fns: f
} = self.split();
VecPerParamSpace::new(t.into_iter().map(|p| pred(p)).collect(),
s.into_iter().map(|p| pred(p)).collect(),
f.into_iter().map(|p| pred(p)).collect())
}
pub fn split(self) -> SeparateVecsPerParamSpace<T> {
let VecPerParamSpace { type_limit, self_limit, content } = self;
let mut content_iter = content.into_iter();
SeparateVecsPerParamSpace {
types: content_iter.by_ref().take(type_limit).collect(),
selfs: content_iter.by_ref().take(self_limit - type_limit).collect(),
fns: content_iter.collect()
}
}
pub fn with_vec(mut self, space: ParamSpace, vec: Vec<T>)
-> VecPerParamSpace<T>
{
assert!(self.is_empty_in(space));
self.replace(space, vec);
self
}
}
#[derive(Clone)]
pub struct EnumeratedItems<'a,T:'a> {
vec: &'a VecPerParamSpace<T>,
space_index: uint,
elem_index: uint
}
impl<'a,T> EnumeratedItems<'a,T> {
fn new(v: &'a VecPerParamSpace<T>) -> EnumeratedItems<'a,T> {
let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 };
result.adjust_space();
result
}
fn adjust_space(&mut self) {
let spaces = ParamSpace::all();
while
self.space_index < spaces.len() &&
self.elem_index >= self.vec.len(spaces[self.space_index])
{
self.space_index += 1;
self.elem_index = 0;
}
}
}
impl<'a,T> Iterator for EnumeratedItems<'a,T> {
type Item = (ParamSpace, uint, &'a T);
fn next(&mut self) -> Option<(ParamSpace, uint, &'a T)> {
let spaces = ParamSpace::all();
if self.space_index < spaces.len() {
let space = spaces[self.space_index];
let index = self.elem_index;
let item = self.vec.get(space, index);
self.elem_index += 1;
self.adjust_space();
Some((space, index, item))
} else {
None
}
}
}
impl<T> IntoIterator for VecPerParamSpace<T> {
type IntoIter = IntoIter<T>;
fn into_iter(self) -> IntoIter<T> {
self.into_vec().into_iter()
}
}
impl<'a,T> IntoIterator for &'a VecPerParamSpace<T> {
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.as_slice().into_iter()
}
}
///////////////////////////////////////////////////////////////////////////
// Public trait `Subst`
//
// Just call `foo.subst(tcx, substs)` to perform a substitution across
// `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when
// there is more information available (for better errors).
pub trait Subst<'tcx> : Sized {
fn subst(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>) -> Self {
self.subst_spanned(tcx, substs, None)
}
fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>,
substs: &Substs<'tcx>,
span: Option<Span>)
-> Self;
}
impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T {
fn subst_spanned(&self,
tcx: &ty::ctxt<'tcx>,
substs: &Substs<'tcx>,
span: Option<Span>)
-> T
{
let mut folder = SubstFolder { tcx: tcx,
substs: substs,
span: span,
root_ty: None,
ty_stack_depth: 0,
region_binders_passed: 0 };
(*self).fold_with(&mut folder)
}
}
///////////////////////////////////////////////////////////////////////////
// The actual substitution engine itself is a type folder.
struct SubstFolder<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
substs: &'a Substs<'tcx>,
// The location for which the substitution is performed, if available.
span: Option<Span>,
// The root type that is being substituted, if available.
root_ty: Option<Ty<'tcx>>,
// Depth of type stack
ty_stack_depth: uint,
// Number of region binders we have passed through while doing the substitution
region_binders_passed: u32,
}
impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx }
fn enter_region_binder(&mut self) {
self.region_binders_passed += 1;
}
fn exit_region_binder(&mut self) {
self.region_binders_passed -= 1;
}
fn fold_region(&mut self, r: ty::Region) -> ty::Region {
// Note: This routine only handles regions that are bound on
// type declarations and other outer declarations, not those
// bound in *fn types*. Region substitution of the bound
// regions that appear in a function signature is done using
// the specialized routine `ty::replace_late_regions()`.
match r {
ty::ReEarlyBound(_, space, i, region_name) => {
match self.substs.regions {
ErasedRegions => ty::ReStatic,
NonerasedRegions(ref regions) =>
match regions.opt_get(space, i as uint) {
Some(&r) => {
self.shift_region_through_binders(r)
}
None => {
let span = self.span.unwrap_or(DUMMY_SP);
self.tcx().sess.span_bug(
span,
|
}
}
|
random_line_split
|
|
subst.rs
|
() -> Substs<'tcx> {
Substs {
types: VecPerParamSpace::empty(),
regions: ErasedRegions
}
}
pub fn is_noop(&self) -> bool {
let regions_is_noop = match self.regions {
ErasedRegions => false, // may be used to canonicalize
NonerasedRegions(ref regions) => regions.is_empty(),
};
regions_is_noop && self.types.is_empty()
}
pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
*self.types.get(ty_param_def.space, ty_param_def.index as uint)
}
pub fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.types.iter().any(|&t| ty::type_escapes_depth(t, depth)) || {
match self.regions {
ErasedRegions =>
false,
NonerasedRegions(ref regions) =>
regions.iter().any(|r| r.escapes_depth(depth)),
}
}
}
pub fn self_ty(&self) -> Option<Ty<'tcx>> {
self.types.get_self().map(|&t| t)
}
pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> {
assert!(self.self_ty().is_none());
let mut s = (*self).clone();
s.types.push(SelfSpace, self_ty);
s
}
pub fn erase_regions(self) -> Substs<'tcx> {
let Substs { types, regions: _ } = self;
Substs { types: types, regions: ErasedRegions }
}
/// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
/// to easily access the set of region substitutions.
pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> {
match self.regions {
ErasedRegions => panic!("Erased regions only expected in trans"),
NonerasedRegions(ref r) => r
}
}
/// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
/// to easily access the set of region substitutions.
pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace<ty::Region> {
match self.regions {
ErasedRegions => panic!("Erased regions only expected in trans"),
NonerasedRegions(ref mut r) => r
}
}
pub fn with_method(self,
m_types: Vec<Ty<'tcx>>,
m_regions: Vec<ty::Region>)
-> Substs<'tcx>
{
let Substs { types, regions } = self;
let types = types.with_vec(FnSpace, m_types);
let regions = regions.map(m_regions,
|r, m_regions| r.with_vec(FnSpace, m_regions));
Substs { types: types, regions: regions }
}
}
impl RegionSubsts {
fn map<A, F>(self, a: A, op: F) -> RegionSubsts where
F: FnOnce(VecPerParamSpace<ty::Region>, A) -> VecPerParamSpace<ty::Region>,
{
match self {
ErasedRegions => ErasedRegions,
NonerasedRegions(r) => NonerasedRegions(op(r, a))
}
}
pub fn is_erased(&self) -> bool {
match *self {
ErasedRegions => true,
NonerasedRegions(_) => false,
}
}
}
///////////////////////////////////////////////////////////////////////////
// ParamSpace
#[derive(PartialOrd, Ord, PartialEq, Eq, Copy,
Clone, Hash, RustcEncodable, RustcDecodable, Debug)]
pub enum ParamSpace {
TypeSpace, // Type parameters attached to a type definition, trait, or impl
SelfSpace, // Self parameter on a trait
FnSpace, // Type parameters attached to a method or fn
}
impl ParamSpace {
pub fn all() -> [ParamSpace; 3] {
[TypeSpace, SelfSpace, FnSpace]
}
pub fn to_uint(self) -> uint {
match self {
TypeSpace => 0,
SelfSpace => 1,
FnSpace => 2,
}
}
pub fn from_uint(u: uint) -> ParamSpace {
match u {
0 => TypeSpace,
1 => SelfSpace,
2 => FnSpace,
_ => panic!("Invalid ParamSpace: {}", u)
}
}
}
/// Vector of things sorted by param space. Used to keep
/// the set of things declared on the type, self, or method
/// distinct.
#[derive(PartialEq, Eq, Clone, Hash, RustcEncodable, RustcDecodable)]
pub struct VecPerParamSpace<T> {
// This was originally represented as a tuple with one Vec<T> for
// each variant of ParamSpace, and that remains the abstraction
// that it provides to its clients.
//
// Here is how the representation corresponds to the abstraction
// i.e. the "abstraction function" AF:
//
// AF(self) = (self.content[..self.type_limit],
// self.content[self.type_limit..self.self_limit],
// self.content[self.self_limit..])
type_limit: uint,
self_limit: uint,
content: Vec<T>,
}
/// The `split` function converts one `VecPerParamSpace` into this
/// `SeparateVecsPerParamSpace` structure.
pub struct SeparateVecsPerParamSpace<T> {
pub types: Vec<T>,
pub selfs: Vec<T>,
pub fns: Vec<T>,
}
impl<T: fmt::Debug> fmt::Debug for VecPerParamSpace<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(write!(fmt, "VecPerParamSpace {{"));
for space in &ParamSpace::all() {
try!(write!(fmt, "{:?}: {:?}, ", *space, self.get_slice(*space)));
}
try!(write!(fmt, "}}"));
Ok(())
}
}
impl<T> VecPerParamSpace<T> {
fn limits(&self, space: ParamSpace) -> (uint, uint) {
match space {
TypeSpace => (0, self.type_limit),
SelfSpace => (self.type_limit, self.self_limit),
FnSpace => (self.self_limit, self.content.len()),
}
}
pub fn empty() -> VecPerParamSpace<T> {
VecPerParamSpace {
type_limit: 0,
self_limit: 0,
content: Vec::new()
}
}
pub fn params_from_type(types: Vec<T>) -> VecPerParamSpace<T> {
VecPerParamSpace::empty().with_vec(TypeSpace, types)
}
/// `t` is the type space.
/// `s` is the self space.
/// `a` is the assoc space.
/// `f` is the fn space.
pub fn new(t: Vec<T>, s: Vec<T>, f: Vec<T>) -> VecPerParamSpace<T> {
let type_limit = t.len();
let self_limit = type_limit + s.len();
let mut content = t;
content.extend(s.into_iter());
content.extend(f.into_iter());
VecPerParamSpace {
type_limit: type_limit,
self_limit: self_limit,
content: content,
}
}
fn new_internal(content: Vec<T>, type_limit: uint, self_limit: uint)
-> VecPerParamSpace<T>
{
VecPerParamSpace {
type_limit: type_limit,
self_limit: self_limit,
content: content,
}
}
/// Appends `value` to the vector associated with `space`.
///
/// Unlike the `push` method in `Vec`, this should not be assumed
/// to be a cheap operation (even when amortized over many calls).
pub fn push(&mut self, space: ParamSpace, value: T) {
let (_, limit) = self.limits(space);
match space {
TypeSpace => { self.type_limit += 1; self.self_limit += 1; }
SelfSpace => { self.self_limit += 1; }
FnSpace => { }
}
self.content.insert(limit, value);
}
/// Appends `values` to the vector associated with `space`.
///
/// Unlike the `extend` method in `Vec`, this should not be assumed
/// to be a cheap operation (even when amortized over many calls).
pub fn extend<I:Iterator<Item=T>>(&mut self, space: ParamSpace, values: I) {
// This could be made more efficient, obviously.
for item in values {
self.push(space, item);
}
}
pub fn pop(&mut self, space: ParamSpace) -> Option<T> {
let (start, limit) = self.limits(space);
if start == limit {
None
} else {
match space {
TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; }
SelfSpace => { self.self_limit -= 1; }
FnSpace => {}
}
if self.content.is_empty() {
None
} else {
Some(self.content.remove(limit - 1))
}
}
}
pub fn truncate(&mut self, space: ParamSpace, len: uint) {
// FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
while self.len(space) > len {
self.pop(space);
}
}
pub fn replace(&mut self, space: ParamSpace, elems: Vec<T>) {
// FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
self.truncate(space, 0);
for t in elems {
self.push(space, t);
}
}
pub fn get_self<'a>(&'a self) -> Option<&'a T> {
let v = self.get_slice(SelfSpace);
assert!(v.len() <= 1);
if v.len() == 0 { None } else { Some(&v[0]) }
}
pub fn len(&self, space: ParamSpace) -> uint {
self.get_slice(space).len()
}
pub fn is_empty_in(&self, space: ParamSpace) -> bool {
self.len(space) == 0
}
pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] {
let (start, limit) = self.limits(space);
&self.content[start.. limit]
}
pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] {
let (start, limit) = self.limits(space);
&mut self.content[start.. limit]
}
pub fn opt_get<'a>(&'a self,
space: ParamSpace,
index: uint)
-> Option<&'a T> {
let v = self.get_slice(space);
if index < v.len() { Some(&v[index]) } else { None }
}
pub fn get<'a>(&'a self, space: ParamSpace, index: uint) -> &'a T {
&self.get_slice(space)[index]
}
pub fn iter<'a>(&'a self) -> Iter<'a,T> {
self.content.iter()
}
pub fn into_iter(self) -> IntoIter<T> {
self.content.into_iter()
}
pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> {
EnumeratedItems::new(self)
}
pub fn as_slice(&self) -> &[T] {
&self.content
}
pub fn into_vec(self) -> Vec<T> {
self.content
}
pub fn all_vecs<P>(&self, mut pred: P) -> bool where
P: FnMut(&[T]) -> bool,
{
let spaces = [TypeSpace, SelfSpace, FnSpace];
spaces.iter().all(|&space| { pred(self.get_slice(space)) })
}
pub fn all<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
self.iter().all(pred)
}
pub fn any<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool {
self.iter().any(pred)
}
pub fn is_empty(&self) -> bool {
self.all_vecs(|v| v.is_empty())
}
pub fn map<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut(&T) -> U {
let result = self.iter().map(pred).collect();
VecPerParamSpace::new_internal(result,
self.type_limit,
self.self_limit)
}
pub fn map_enumerated<U, P>(&self, pred: P) -> VecPerParamSpace<U> where
P: FnMut((ParamSpace, uint, &T)) -> U,
{
let result = self.iter_enumerated().map(pred).collect();
VecPerParamSpace::new_internal(result,
self.type_limit,
self.self_limit)
}
pub fn map_move<U, F>(self, mut pred: F) -> VecPerParamSpace<U> where
F: FnMut(T) -> U,
{
let SeparateVecsPerParamSpace {
types: t,
selfs: s,
fns: f
} = self.split();
VecPerParamSpace::new(t.into_iter().map(|p| pred(p)).collect(),
s.into_iter().map(|p| pred(p)).collect(),
f.into_iter().map(|p| pred(p)).collect())
}
pub fn split(self) -> SeparateVecsPerParamSpace<T> {
let VecPerParamSpace { type_limit, self_limit, content } = self;
let mut content_iter = content.into_iter();
SeparateVecsPerParamSpace {
types: content_iter.by_ref().take(type_limit).collect(),
selfs: content_iter.by_ref().take(self_limit - type_limit).collect(),
fns: content_iter.collect()
}
}
pub fn with_vec(mut self, space: ParamSpace, vec: Vec<T>)
-> VecPerParamSpace<T>
{
assert!(self.is_empty_in(space));
self.replace(space, vec);
self
}
}
#[derive(Clone)]
pub struct EnumeratedItems<'a,T:'a> {
vec: &'a VecPerParamSpace<T>,
space_index: uint,
elem_index: uint
}
impl<'a,T> EnumeratedItems<'a,T> {
fn new(v: &'a VecPerParamSpace<T>) -> EnumeratedItems<'a,T> {
let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 };
result.adjust_space();
result
}
fn adjust_space(&mut self) {
let spaces = ParamSpace::all();
while
self.space_index < spaces.len() &&
self.elem_index >= self.vec.len(spaces[self.space_index])
{
self.space_index += 1;
self.elem_index = 0;
}
}
}
impl<'a,T> Iterator for EnumeratedItems<'a,T> {
type Item = (ParamSpace, uint, &'a T);
fn next(&mut self) -> Option<(ParamSpace, uint, &'a T)> {
let spaces = ParamSpace::all();
if self.space_index < spaces.len() {
let space = spaces[self.space_index];
let index = self.elem_index;
let item = self.vec.get(space, index);
self.elem_index += 1;
self.adjust_space();
Some((space, index, item))
} else {
None
}
}
}
impl<T> IntoIterator for VecPerParamSpace<T> {
type IntoIter = IntoIter<T>;
fn into_iter(self) -> IntoIter<T> {
self.into_vec().into_iter()
}
}
impl<'a,T> IntoIterator for &'a VecPerParamSpace<T> {
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.as_slice().into_iter()
}
}
///////////////////////////////////////////////////////////////////////////
// Public trait `Subst`
//
// Just call `foo.subst(tcx, substs)` to perform a substitution across
// `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when
// there is more information available (for better errors).
pub trait Subst<'tcx> : Sized {
fn subst(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>) -> Self {
self.subst_spanned(tcx, substs, None)
}
fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>,
substs: &Substs<'tcx>,
span: Option<Span>)
-> Self;
}
impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T {
fn subst_spanned(&self,
tcx: &ty::ctxt<'tcx>,
substs: &Substs<'tcx>,
span: Option<Span>)
-> T
{
let mut folder = SubstFolder { tcx: tcx,
substs: substs,
span: span,
root_ty: None,
ty_stack_depth: 0,
region_binders_passed: 0 };
(*self).fold_with(&mut folder)
}
}
///////////////////////////////////////////////////////////////////////////
// The actual substitution engine itself is a type folder.
struct SubstFolder<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
substs: &'a Substs<'tcx>,
// The location for which the substitution is performed, if available.
span: Option<Span>,
// The root type that is being substituted, if available.
root_ty: Option<Ty<'tcx>>,
// Depth of type stack
ty_stack_depth: uint,
// Number of region binders we have passed through while doing the substitution
region_binders_passed: u32,
}
impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx }
fn
|
(&mut self) {
self.region_binders_passed += 1;
}
fn exit_region_binder(&mut self) {
self.region_binders_passed -= 1;
}
fn fold_region(&mut self, r: ty::Region) -> ty::Region {
// Note: This routine only handles regions that are bound on
// type declarations and other outer declarations, not those
// bound in *fn types*. Region substitution of the bound
// regions that appear in a function signature is done using
// the specialized routine `ty::replace_late_regions()`.
match r {
ty::ReEarlyBound(_, space, i, region_name) => {
match self.substs.regions {
ErasedRegions => ty::ReStatic,
NonerasedRegions(ref regions) =>
match regions.opt_get(space, i as uint) {
Some(&r) => {
self.shift_region_through_binders(r)
}
None => {
let span = self.span.unwrap_or(DUMMY_SP);
self.tcx().sess.span_bug(
span,
|
enter_region_binder
|
identifier_name
|
shutdown.rs
|
extern crate arg_parser;
extern crate extra;
extern crate syscall;
use std::env;
use std::io::{stderr, stdout, Error, Write};
use std::process::exit;
use arg_parser::ArgParser;
use extra::option::OptionalExt;
use syscall::flag::{SIGTERM, SIGKILL};
const MAN_PAGE: &'static str = /* @MANSTART{shutdown} */ r#"
NAME
shutdown - stop the system
SYNOPSIS
shutdown [ -h | --help ] [ -r | --reboot ]
DESCRIPTION
Attempt to shutdown the system using ACPI. Failure will be logged to the terminal
OPTIONS
-h
--help
display this help and exit
-r
--reboot
reboot instead of powering off
"#; /* @MANEND */
fn main() {
let stdout = stdout();
let mut stdout = stdout.lock();
let mut stderr = stderr();
let mut parser = ArgParser::new(1)
.add_flag(&["h", "help"])
.add_flag(&["r", "reboot"]);
parser.parse(env::args());
if parser.found("help") {
stdout.write(MAN_PAGE.as_bytes()).try(&mut stderr);
stdout.flush().try(&mut stderr);
exit(0);
}
if parser.found("reboot") {
syscall::kill(1, SIGTERM).map_err(|err| Error::from_raw_os_error(err.errno)).try(&mut stderr);
} else
|
}
|
{
syscall::kill(1, SIGKILL).map_err(|err| Error::from_raw_os_error(err.errno)).try(&mut stderr);
}
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.