file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
exp2f64.rs
#![feature(core, core_intrinsics, core_float)] extern crate core; #[cfg(test)] mod tests { use core::intrinsics::exp2f64;
// pub fn exp2f64(x: f64) -> f64; #[test] fn expf64_test1() { let x: f64 = f64::nan(); let result: f64 = unsafe { exp2f64(x) }; assert_eq!(result.is_nan(), true); } #[test] fn expf64_test2() { let x: f64 = f64::infinity(); let result: f64 = unsafe { exp2f64(x) }; assert_eq!(result, f64::infinity()); } #[test] fn expf64_test3() { let x: f64 = f64::neg_infinity(); let result: f64 = unsafe { exp2f64(x) }; assert_eq!(result, 0.0); } #[test] fn expf64_test4() { let x: f64 = 10.0; let result: f64 = unsafe { exp2f64(x) }; assert_eq!(result, 1024.0); } }
use core::num::Float; use core::f64;
random_line_split
lib.rs
#![deny(unsafe_code)] extern crate app_units; extern crate atomic_refcell; #[macro_use] extern crate bitflags; extern crate canvas_traits; extern crate euclid; extern crate fnv; extern crate fxhash; extern crate gfx; extern crate gfx_traits; #[macro_use] extern crate html5ever; extern crate ipc_channel; extern crate libc; #[macro_use] extern crate log; extern crate malloc_size_of; extern crate msg; extern crate net_traits; extern crate ordered_float; extern crate parking_lot; extern crate profile_traits; #[macro_use] extern crate range; extern crate rayon; extern crate script_layout_interface; extern crate script_traits; #[macro_use] extern crate serde; extern crate serde_json; extern crate servo_arc; extern crate servo_atoms; extern crate servo_config; extern crate servo_geometry; extern crate servo_url; extern crate smallvec; extern crate style; extern crate style_traits; extern crate unicode_bidi; extern crate unicode_script; extern crate webrender_api; extern crate xi_unicode; #[macro_use] pub mod layout_debug; pub mod animation; mod block; pub mod construct; pub mod context; pub mod data; pub mod display_list; mod flex; mod floats; pub mod flow; mod flow_list; pub mod flow_ref; mod fragment; mod generated_content; pub mod incremental; mod inline; mod linked_list; mod list_item; mod model; mod multicol; pub mod opaque_node; pub mod parallel; mod persistent_list; pub mod query; pub mod sequential; mod table; mod table_caption; mod table_cell; mod table_colgroup; mod table_row; mod table_rowgroup; mod table_wrapper; mod text; pub mod traversal; pub mod wrapper; // For unit tests: pub use fragment::Fragment; pub use fragment::SpecificFragmentInfo; pub use self::data::LayoutData; // We can't use servo_arc for everything in layout, because the Flow stuff uses // weak references. use servo_arc::Arc as ServoArc;
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
random_line_split
mma8652fc.rs
use blue_pill::stm32f103xx::I2C1; use cast::u16; use cortex_m; use i2c; const I2C_ADDRESS: u8 = 0x1D; /// MMA8652FC Register Addresses #[allow(dead_code)] #[allow(non_camel_case_types)] #[derive(Clone, Copy)] pub enum Register { /// Status Register (R) STATUS = 0x00, /// [7:0] are 8 MSBs of the 14-bit X-axis sample (R) OUT_X_MSB = 0x01, /// [7:2] are 6 LSBs of the 14-bit X-axis sample (R) OUT_X_LSB = 0x02, /// [7:0] are 8 MSBs of the 14-bit Y-axis sample (R) OUT_Y_MSB = 0x03, /// [7:2] are 6 LSBs of the 14-bit Y-axis sample (R) OOT_Y_LSB = 0x04, /// [7:0] are 8 MSBs of the 14-bit Z-axis sample (R) OUT_Z_MSB = 0x05, /// [7:2] are 6 LSBs of the 14-bit Z-axis sample (R) OUT_Z_LSB = 0x06, /// FIFO Setup Register (R/W) F_SETUP = 0x09, /// Map of FIFO data capture events (R/W) TRIG_CFG = 0x0A, /// System Mode Register (R) SYSMOD = 0x0B, /// System Interrupt Status Register (R) INT_SOURCE = 0x0C, /// Device ID Register (R) WHO_AM_I = 0x0D, /// Sensor Data Configuration Register (R/W) XYZ_DATA_CFG = 0x0E, /// High Pass Filter Register (R/W) HP_FILTER_CUTOFF = 0x0F, /// Portait/tLandscape Status Register (R) PL_STATUS = 0x10, /// Portrait/Landscape Configuration Register (R/W) PL_CFG = 0x11, /// Portrait/Landscape Debounce Register (R/W) PL_COUNT = 0x12, /// Portrait/Landscape Back/Front and Z Compensation Register (R/W) PL_BF_ZCOMP = 0x13, /// Portrait/Landscape Threshold Register (R/W) P_L_THS_REG = 0x14, /// Freefall and Motion Configuration Register (R/W) FF_MT_CFG = 0x15, /// Freefall and Motion Source Register (R) FF_MT_SRC = 0x16, /// Freefall and Motion Threshold Register (R/W) FF_MT_THS = 0x17, /// Freefall Motion Count Register (R/W) FF_MT_COUNT = 0x18, /// Transient Configuration Register (R/W) TRANSIENT_CFG = 0x1D, /// Transient Source Register (R) TRANSIENT_SRC = 0x1E, /// Transient Threshold Register (R/W) TRANSIENT_THS = 0x1F, /// Transient Debounce Counter Register (R/W) TRANSIENT_COUNT = 0x20, /// Pulse Configuration Register (R/W) PULSE_CFG = 0x21, /// Pulse Source Register (R) PULSE_SRC = 0x22, /// Pulse X Threshold Register (R/W) PULSE_THS_X = 0x23, /// Pulse Y Threshold Register (R/W) PULSE_THS_Y = 0x24, /// Pulse Z Threshold Register (R/W) PULSE_THS_Z = 0x25, /// Pulse Time Window Register (R/W) PULSE_TLMT = 0x26, /// Pulse Latency Timer Register (R/W) PULSE_LTCY = 0x27, /// Second Pulse Time Window Register (R/W) PULSE_WIND = 0x28, /// Auto Sleep Inactivity Timer Register (R/W) ALSP_COUNT = 0x29, /// System Control 1 Register (R/W) CTRL_REG1 = 0x2A, /// System Control 2 Register (R/W) CTRL_REG2 = 0x2B, /// Interrupt Control Register (R/W) CTRL_REG3 = 0x2C, /// Interrupt Enable Register (R/W) CTRL_REG4 = 0x2D, /// Interrupt Configuration Register (R/W) CTRL_REG5 = 0x2E, /// X Offset Correction Register (R/W) OFF_X = 0x2F, /// Y Offset Correction Register (R/W) OFF_Y = 0x30, /// Z Offset Correction Register (R/W) OFF_Z = 0x31, } impl Register { pub fn addr(&self) -> u8 { *self as u8 } } #[derive(Clone, Copy)] pub struct Accel { /// X component pub x: i16, /// Y component pub y: i16, /// Z component pub z: i16, } pub struct MMA8652FC<'a>(pub &'a I2C1); impl<'a> MMA8652FC<'a> { pub fn init(&self) { self // Normal Mode .set_register(Register::CTRL_REG2, 0); // Reset all registers to POR values self.set_register(Register::CTRL_REG2, 0x40); for _ in 0..10_000 { cortex_m::asm::nop(); } // Enable motion detection for X, Y and Z axis, latch disabled self.set_register(Register::FF_MT_CFG, 0x78); // self.set_register(Register::FreefallMotionThr, 0x10); // self.set_register(Register::FreefallMotionCnt, 0x02); // Enable orientation detection self.set_register(Register::PL_CFG, 0x40); // set Debounce to 200 Counts self.set_register(Register::PL_COUNT, 200); // set Threshold to 42 degrees self.set_register(Register::PL_BF_ZCOMP, 0b01000111); // set threshold self.set_register(Register::P_L_THS_REG, 0b10011100); // enable data ready and orientation interrupt self.set_register(Register::CTRL_REG4, 0x01 | (1 << 4)); // route data ready interrupt to INT1 and orientation interrupt to INT2 self.set_register(Register::CTRL_REG5, 0x01); // set maximum resolution oversampling self.set_register(Register::CTRL_REG2, 0x12); // select high pass filtered data self.set_register(Register::XYZ_DATA_CFG, (1 << 4)); // select high pass filtered data self.set_register(Register::HP_FILTER_CUTOFF, 0x03); // 12 Hz, active mode self.set_register(Register::CTRL_REG1, 0x19); } #[allow(dead_code)] pub fn
(&self, threshold: u8, filter_time: u8) -> &Self { let sens = 9 * 2 + 17 - 2 * threshold; self // sleep mode .set_register(Register::CTRL_REG1, 0); // set accumulation threshold self.set_register(Register::FF_MT_THS, (sens & 0x7F)); // set debounce threshold self.set_register(Register::FF_MT_COUNT, filter_time); // 12 Hz, active mode self.set_register(Register::CTRL_REG1, 0x31) } pub fn accel(&self) -> Accel { let mut bytes = [0; 6]; i2c::read(&self.0, I2C_ADDRESS, Register::OUT_X_MSB.addr(), &mut bytes); Accel { x: ((u16(bytes[0]) << 8 ) + u16(bytes[1])) as i16, y: ((u16(bytes[2]) << 8 ) + u16(bytes[3])) as i16, z: ((u16(bytes[4]) << 8 ) + u16(bytes[5])) as i16, } } pub fn set_register(&self, reg: Register, value: u8) -> &Self { i2c::write(&self.0, I2C_ADDRESS, reg.addr(), value); self } }
set_sensitivity
identifier_name
mma8652fc.rs
use blue_pill::stm32f103xx::I2C1; use cast::u16; use cortex_m; use i2c; const I2C_ADDRESS: u8 = 0x1D; /// MMA8652FC Register Addresses #[allow(dead_code)] #[allow(non_camel_case_types)] #[derive(Clone, Copy)] pub enum Register { /// Status Register (R) STATUS = 0x00, /// [7:0] are 8 MSBs of the 14-bit X-axis sample (R) OUT_X_MSB = 0x01, /// [7:2] are 6 LSBs of the 14-bit X-axis sample (R) OUT_X_LSB = 0x02, /// [7:0] are 8 MSBs of the 14-bit Y-axis sample (R) OUT_Y_MSB = 0x03, /// [7:2] are 6 LSBs of the 14-bit Y-axis sample (R) OOT_Y_LSB = 0x04, /// [7:0] are 8 MSBs of the 14-bit Z-axis sample (R) OUT_Z_MSB = 0x05, /// [7:2] are 6 LSBs of the 14-bit Z-axis sample (R) OUT_Z_LSB = 0x06, /// FIFO Setup Register (R/W) F_SETUP = 0x09, /// Map of FIFO data capture events (R/W) TRIG_CFG = 0x0A, /// System Mode Register (R) SYSMOD = 0x0B, /// System Interrupt Status Register (R) INT_SOURCE = 0x0C, /// Device ID Register (R) WHO_AM_I = 0x0D, /// Sensor Data Configuration Register (R/W) XYZ_DATA_CFG = 0x0E, /// High Pass Filter Register (R/W) HP_FILTER_CUTOFF = 0x0F, /// Portait/tLandscape Status Register (R) PL_STATUS = 0x10, /// Portrait/Landscape Configuration Register (R/W) PL_CFG = 0x11, /// Portrait/Landscape Debounce Register (R/W) PL_COUNT = 0x12, /// Portrait/Landscape Back/Front and Z Compensation Register (R/W) PL_BF_ZCOMP = 0x13, /// Portrait/Landscape Threshold Register (R/W) P_L_THS_REG = 0x14, /// Freefall and Motion Configuration Register (R/W) FF_MT_CFG = 0x15, /// Freefall and Motion Source Register (R) FF_MT_SRC = 0x16, /// Freefall and Motion Threshold Register (R/W) FF_MT_THS = 0x17, /// Freefall Motion Count Register (R/W) FF_MT_COUNT = 0x18, /// Transient Configuration Register (R/W) TRANSIENT_CFG = 0x1D, /// Transient Source Register (R) TRANSIENT_SRC = 0x1E, /// Transient Threshold Register (R/W) TRANSIENT_THS = 0x1F, /// Transient Debounce Counter Register (R/W) TRANSIENT_COUNT = 0x20, /// Pulse Configuration Register (R/W) PULSE_CFG = 0x21, /// Pulse Source Register (R) PULSE_SRC = 0x22, /// Pulse X Threshold Register (R/W) PULSE_THS_X = 0x23, /// Pulse Y Threshold Register (R/W) PULSE_THS_Y = 0x24, /// Pulse Z Threshold Register (R/W) PULSE_THS_Z = 0x25, /// Pulse Time Window Register (R/W) PULSE_TLMT = 0x26, /// Pulse Latency Timer Register (R/W) PULSE_LTCY = 0x27, /// Second Pulse Time Window Register (R/W) PULSE_WIND = 0x28, /// Auto Sleep Inactivity Timer Register (R/W) ALSP_COUNT = 0x29, /// System Control 1 Register (R/W) CTRL_REG1 = 0x2A, /// System Control 2 Register (R/W) CTRL_REG2 = 0x2B, /// Interrupt Control Register (R/W) CTRL_REG3 = 0x2C, /// Interrupt Enable Register (R/W) CTRL_REG4 = 0x2D, /// Interrupt Configuration Register (R/W) CTRL_REG5 = 0x2E, /// X Offset Correction Register (R/W) OFF_X = 0x2F, /// Y Offset Correction Register (R/W) OFF_Y = 0x30, /// Z Offset Correction Register (R/W) OFF_Z = 0x31, } impl Register { pub fn addr(&self) -> u8
} #[derive(Clone, Copy)] pub struct Accel { /// X component pub x: i16, /// Y component pub y: i16, /// Z component pub z: i16, } pub struct MMA8652FC<'a>(pub &'a I2C1); impl<'a> MMA8652FC<'a> { pub fn init(&self) { self // Normal Mode .set_register(Register::CTRL_REG2, 0); // Reset all registers to POR values self.set_register(Register::CTRL_REG2, 0x40); for _ in 0..10_000 { cortex_m::asm::nop(); } // Enable motion detection for X, Y and Z axis, latch disabled self.set_register(Register::FF_MT_CFG, 0x78); // self.set_register(Register::FreefallMotionThr, 0x10); // self.set_register(Register::FreefallMotionCnt, 0x02); // Enable orientation detection self.set_register(Register::PL_CFG, 0x40); // set Debounce to 200 Counts self.set_register(Register::PL_COUNT, 200); // set Threshold to 42 degrees self.set_register(Register::PL_BF_ZCOMP, 0b01000111); // set threshold self.set_register(Register::P_L_THS_REG, 0b10011100); // enable data ready and orientation interrupt self.set_register(Register::CTRL_REG4, 0x01 | (1 << 4)); // route data ready interrupt to INT1 and orientation interrupt to INT2 self.set_register(Register::CTRL_REG5, 0x01); // set maximum resolution oversampling self.set_register(Register::CTRL_REG2, 0x12); // select high pass filtered data self.set_register(Register::XYZ_DATA_CFG, (1 << 4)); // select high pass filtered data self.set_register(Register::HP_FILTER_CUTOFF, 0x03); // 12 Hz, active mode self.set_register(Register::CTRL_REG1, 0x19); } #[allow(dead_code)] pub fn set_sensitivity(&self, threshold: u8, filter_time: u8) -> &Self { let sens = 9 * 2 + 17 - 2 * threshold; self // sleep mode .set_register(Register::CTRL_REG1, 0); // set accumulation threshold self.set_register(Register::FF_MT_THS, (sens & 0x7F)); // set debounce threshold self.set_register(Register::FF_MT_COUNT, filter_time); // 12 Hz, active mode self.set_register(Register::CTRL_REG1, 0x31) } pub fn accel(&self) -> Accel { let mut bytes = [0; 6]; i2c::read(&self.0, I2C_ADDRESS, Register::OUT_X_MSB.addr(), &mut bytes); Accel { x: ((u16(bytes[0]) << 8 ) + u16(bytes[1])) as i16, y: ((u16(bytes[2]) << 8 ) + u16(bytes[3])) as i16, z: ((u16(bytes[4]) << 8 ) + u16(bytes[5])) as i16, } } pub fn set_register(&self, reg: Register, value: u8) -> &Self { i2c::write(&self.0, I2C_ADDRESS, reg.addr(), value); self } }
{ *self as u8 }
identifier_body
mma8652fc.rs
use blue_pill::stm32f103xx::I2C1; use cast::u16; use cortex_m; use i2c;
#[derive(Clone, Copy)] pub enum Register { /// Status Register (R) STATUS = 0x00, /// [7:0] are 8 MSBs of the 14-bit X-axis sample (R) OUT_X_MSB = 0x01, /// [7:2] are 6 LSBs of the 14-bit X-axis sample (R) OUT_X_LSB = 0x02, /// [7:0] are 8 MSBs of the 14-bit Y-axis sample (R) OUT_Y_MSB = 0x03, /// [7:2] are 6 LSBs of the 14-bit Y-axis sample (R) OOT_Y_LSB = 0x04, /// [7:0] are 8 MSBs of the 14-bit Z-axis sample (R) OUT_Z_MSB = 0x05, /// [7:2] are 6 LSBs of the 14-bit Z-axis sample (R) OUT_Z_LSB = 0x06, /// FIFO Setup Register (R/W) F_SETUP = 0x09, /// Map of FIFO data capture events (R/W) TRIG_CFG = 0x0A, /// System Mode Register (R) SYSMOD = 0x0B, /// System Interrupt Status Register (R) INT_SOURCE = 0x0C, /// Device ID Register (R) WHO_AM_I = 0x0D, /// Sensor Data Configuration Register (R/W) XYZ_DATA_CFG = 0x0E, /// High Pass Filter Register (R/W) HP_FILTER_CUTOFF = 0x0F, /// Portait/tLandscape Status Register (R) PL_STATUS = 0x10, /// Portrait/Landscape Configuration Register (R/W) PL_CFG = 0x11, /// Portrait/Landscape Debounce Register (R/W) PL_COUNT = 0x12, /// Portrait/Landscape Back/Front and Z Compensation Register (R/W) PL_BF_ZCOMP = 0x13, /// Portrait/Landscape Threshold Register (R/W) P_L_THS_REG = 0x14, /// Freefall and Motion Configuration Register (R/W) FF_MT_CFG = 0x15, /// Freefall and Motion Source Register (R) FF_MT_SRC = 0x16, /// Freefall and Motion Threshold Register (R/W) FF_MT_THS = 0x17, /// Freefall Motion Count Register (R/W) FF_MT_COUNT = 0x18, /// Transient Configuration Register (R/W) TRANSIENT_CFG = 0x1D, /// Transient Source Register (R) TRANSIENT_SRC = 0x1E, /// Transient Threshold Register (R/W) TRANSIENT_THS = 0x1F, /// Transient Debounce Counter Register (R/W) TRANSIENT_COUNT = 0x20, /// Pulse Configuration Register (R/W) PULSE_CFG = 0x21, /// Pulse Source Register (R) PULSE_SRC = 0x22, /// Pulse X Threshold Register (R/W) PULSE_THS_X = 0x23, /// Pulse Y Threshold Register (R/W) PULSE_THS_Y = 0x24, /// Pulse Z Threshold Register (R/W) PULSE_THS_Z = 0x25, /// Pulse Time Window Register (R/W) PULSE_TLMT = 0x26, /// Pulse Latency Timer Register (R/W) PULSE_LTCY = 0x27, /// Second Pulse Time Window Register (R/W) PULSE_WIND = 0x28, /// Auto Sleep Inactivity Timer Register (R/W) ALSP_COUNT = 0x29, /// System Control 1 Register (R/W) CTRL_REG1 = 0x2A, /// System Control 2 Register (R/W) CTRL_REG2 = 0x2B, /// Interrupt Control Register (R/W) CTRL_REG3 = 0x2C, /// Interrupt Enable Register (R/W) CTRL_REG4 = 0x2D, /// Interrupt Configuration Register (R/W) CTRL_REG5 = 0x2E, /// X Offset Correction Register (R/W) OFF_X = 0x2F, /// Y Offset Correction Register (R/W) OFF_Y = 0x30, /// Z Offset Correction Register (R/W) OFF_Z = 0x31, } impl Register { pub fn addr(&self) -> u8 { *self as u8 } } #[derive(Clone, Copy)] pub struct Accel { /// X component pub x: i16, /// Y component pub y: i16, /// Z component pub z: i16, } pub struct MMA8652FC<'a>(pub &'a I2C1); impl<'a> MMA8652FC<'a> { pub fn init(&self) { self // Normal Mode .set_register(Register::CTRL_REG2, 0); // Reset all registers to POR values self.set_register(Register::CTRL_REG2, 0x40); for _ in 0..10_000 { cortex_m::asm::nop(); } // Enable motion detection for X, Y and Z axis, latch disabled self.set_register(Register::FF_MT_CFG, 0x78); // self.set_register(Register::FreefallMotionThr, 0x10); // self.set_register(Register::FreefallMotionCnt, 0x02); // Enable orientation detection self.set_register(Register::PL_CFG, 0x40); // set Debounce to 200 Counts self.set_register(Register::PL_COUNT, 200); // set Threshold to 42 degrees self.set_register(Register::PL_BF_ZCOMP, 0b01000111); // set threshold self.set_register(Register::P_L_THS_REG, 0b10011100); // enable data ready and orientation interrupt self.set_register(Register::CTRL_REG4, 0x01 | (1 << 4)); // route data ready interrupt to INT1 and orientation interrupt to INT2 self.set_register(Register::CTRL_REG5, 0x01); // set maximum resolution oversampling self.set_register(Register::CTRL_REG2, 0x12); // select high pass filtered data self.set_register(Register::XYZ_DATA_CFG, (1 << 4)); // select high pass filtered data self.set_register(Register::HP_FILTER_CUTOFF, 0x03); // 12 Hz, active mode self.set_register(Register::CTRL_REG1, 0x19); } #[allow(dead_code)] pub fn set_sensitivity(&self, threshold: u8, filter_time: u8) -> &Self { let sens = 9 * 2 + 17 - 2 * threshold; self // sleep mode .set_register(Register::CTRL_REG1, 0); // set accumulation threshold self.set_register(Register::FF_MT_THS, (sens & 0x7F)); // set debounce threshold self.set_register(Register::FF_MT_COUNT, filter_time); // 12 Hz, active mode self.set_register(Register::CTRL_REG1, 0x31) } pub fn accel(&self) -> Accel { let mut bytes = [0; 6]; i2c::read(&self.0, I2C_ADDRESS, Register::OUT_X_MSB.addr(), &mut bytes); Accel { x: ((u16(bytes[0]) << 8 ) + u16(bytes[1])) as i16, y: ((u16(bytes[2]) << 8 ) + u16(bytes[3])) as i16, z: ((u16(bytes[4]) << 8 ) + u16(bytes[5])) as i16, } } pub fn set_register(&self, reg: Register, value: u8) -> &Self { i2c::write(&self.0, I2C_ADDRESS, reg.addr(), value); self } }
const I2C_ADDRESS: u8 = 0x1D; /// MMA8652FC Register Addresses #[allow(dead_code)] #[allow(non_camel_case_types)]
random_line_split
basic_shape.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! CSS handling for the [`basic-shape`](https://drafts.csswg.org/css-shapes/#typedef-basic-shape) //! types that are generic over their `ToCss` implementations. use euclid::size::Size2D; use std::fmt; use style_traits::{HasViewportPercentage, ToCss}; use values::computed::ComputedValueAsSpecified; use values::generics::BorderRadiusSize; use values::generics::position::Position; use values::generics::rect::Rect; use values::specified::url::SpecifiedUrl; /// A clipping shape, for `clip-path`. pub type ClippingShape<BasicShape> = ShapeSource<BasicShape, GeometryBox>; /// https://drafts.fxtf.org/css-masking-1/#typedef-geometry-box #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Copy, Debug, PartialEq)] pub enum GeometryBox { FillBox, StrokeBox, ViewBox, ShapeBox(ShapeBox), } impl ComputedValueAsSpecified for GeometryBox {} /// A float area shape, for `shape-outside`. pub type FloatAreaShape<BasicShape> = ShapeSource<BasicShape, ShapeBox>; // https://drafts.csswg.org/css-shapes-1/#typedef-shape-box define_css_keyword_enum!(ShapeBox: "margin-box" => MarginBox, "border-box" => BorderBox, "padding-box" => PaddingBox, "content-box" => ContentBox ); add_impls_for_keyword_enum!(ShapeBox); /// A shape source, for some reference box. #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] pub enum ShapeSource<BasicShape, ReferenceBox> { Url(SpecifiedUrl), Shape(BasicShape, Option<ReferenceBox>), Box(ReferenceBox), None, } #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] pub enum BasicShape<H, V, LengthOrPercentage> { Inset(InsetRect<LengthOrPercentage>), Circle(Circle<H, V, LengthOrPercentage>), Ellipse(Ellipse<H, V, LengthOrPercentage>), Polygon(Polygon<LengthOrPercentage>), } /// https://drafts.csswg.org/css-shapes/#funcdef-inset #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] pub struct InsetRect<LengthOrPercentage> { pub rect: Rect<LengthOrPercentage>, pub round: Option<BorderRadius<LengthOrPercentage>>, } /// A generic type used for `border-radius`, `outline-radius` and `inset()` values. /// /// https://drafts.csswg.org/css-backgrounds-3/#border-radius #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] pub struct BorderRadius<LengthOrPercentage> { /// The top left radius. pub top_left: BorderRadiusSize<LengthOrPercentage>, /// The top right radius. pub top_right: BorderRadiusSize<LengthOrPercentage>, /// The bottom right radius. pub bottom_right: BorderRadiusSize<LengthOrPercentage>, /// The bottom left radius. pub bottom_left: BorderRadiusSize<LengthOrPercentage>, } /// https://drafts.csswg.org/css-shapes/#funcdef-circle #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Copy, Debug, PartialEq, ToComputedValue)] pub struct Circle<H, V, LengthOrPercentage> { pub position: Position<H, V>, pub radius: ShapeRadius<LengthOrPercentage>, } /// https://drafts.csswg.org/css-shapes/#funcdef-ellipse #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Copy, Debug, PartialEq, ToComputedValue)] pub struct Ellipse<H, V, LengthOrPercentage> { pub position: Position<H, V>, pub semiaxis_x: ShapeRadius<LengthOrPercentage>, pub semiaxis_y: ShapeRadius<LengthOrPercentage>, } /// https://drafts.csswg.org/css-shapes/#typedef-shape-radius #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Copy, Debug, PartialEq, ToComputedValue)] pub enum ShapeRadius<LengthOrPercentage> { Length(LengthOrPercentage), ClosestSide, FarthestSide, } #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] /// A generic type for representing the `polygon()` function /// /// https://drafts.csswg.org/css-shapes/#funcdef-polygon pub struct Polygon<LengthOrPercentage> { /// The filling rule for a polygon. pub fill: FillRule, /// A collection of (x, y) coordinates to draw the polygon. pub coordinates: Vec<(LengthOrPercentage, LengthOrPercentage)>, } // https://drafts.csswg.org/css-shapes/#typedef-fill-rule // NOTE: Basic shapes spec says that these are the only two values, however // https://www.w3.org/TR/SVG/painting.html#FillRuleProperty // says that it can also be `inherit` define_css_keyword_enum!(FillRule: "nonzero" => NonZero, "evenodd" => EvenOdd ); add_impls_for_keyword_enum!(FillRule); impl<B, T> HasViewportPercentage for ShapeSource<B, T> { #[inline] fn has_viewport_percentage(&self) -> bool { false } } impl<B: ToCss, T: ToCss> ToCss for ShapeSource<B, T> { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { ShapeSource::Url(ref url) => url.to_css(dest), ShapeSource::Shape(ref shape, Some(ref ref_box)) => { shape.to_css(dest)?; dest.write_str(" ")?; ref_box.to_css(dest) }, ShapeSource::Shape(ref shape, None) => shape.to_css(dest), ShapeSource::Box(ref val) => val.to_css(dest), ShapeSource::None => dest.write_str("none"), } } } impl ToCss for GeometryBox { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { GeometryBox::FillBox => dest.write_str("fill-box"), GeometryBox::StrokeBox => dest.write_str("stroke-box"), GeometryBox::ViewBox => dest.write_str("view-box"), GeometryBox::ShapeBox(s) => s.to_css(dest), } } } impl<H, V, L> ToCss for BasicShape<H, V, L> where H: ToCss, V: ToCss, L: PartialEq + ToCss, Circle<H, V, L>: ToCss, Ellipse<H, V, L>: ToCss, { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { BasicShape::Inset(ref rect) => rect.to_css(dest), BasicShape::Circle(ref circle) => circle.to_css(dest), BasicShape::Ellipse(ref ellipse) => ellipse.to_css(dest), BasicShape::Polygon(ref polygon) => polygon.to_css(dest), } } } impl<L> ToCss for InsetRect<L> where L: ToCss + PartialEq { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { dest.write_str("inset(")?; self.rect.to_css(dest)?; if let Some(ref radius) = self.round { dest.write_str(" round ")?; radius.to_css(dest)?; } dest.write_str(")") } } impl<L: ToCss + PartialEq> ToCss for BorderRadius<L> { #[inline] fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { serialize_radius_values(dest, &self.top_left.0, &self.top_right.0, &self.bottom_right.0, &self.bottom_left.0) } } /// Serialization helper for types of longhands like `border-radius` and `outline-radius` pub fn serialize_radius_values<L, W>(dest: &mut W, top_left: &Size2D<L>, top_right: &Size2D<L>, bottom_right: &Size2D<L>, bottom_left: &Size2D<L>) -> fmt::Result where L: ToCss + PartialEq, W: fmt::Write { Rect::new(&top_left.width, &top_right.width, &bottom_right.width, &bottom_left.width).to_css(dest)?; if top_left.width!= top_left.height || top_right.width!= top_right.height || bottom_right.width!= bottom_right.height || bottom_left.width!= bottom_left.height { dest.write_str(" / ")?; Rect::new(&top_left.height, &top_right.height, &bottom_right.height, &bottom_left.height).to_css(dest)?; } Ok(()) } impl<L> Default for ShapeRadius<L> { #[inline] fn default() -> Self { ShapeRadius::ClosestSide } } impl<L: ToCss> ToCss for ShapeRadius<L> { #[inline] fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { ShapeRadius::Length(ref lop) => lop.to_css(dest), ShapeRadius::ClosestSide => dest.write_str("closest-side"), ShapeRadius::FarthestSide => dest.write_str("farthest-side"), } } } impl<L: ToCss> ToCss for Polygon<L> { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { dest.write_str("polygon(")?; if self.fill!= FillRule::default() { self.fill.to_css(dest)?; dest.write_str(", ")?; } for (i, coord) in self.coordinates.iter().enumerate() { if i > 0 { dest.write_str(", ")?; } coord.0.to_css(dest)?; dest.write_str(" ")?; coord.1.to_css(dest)?; } dest.write_str(")") } } impl Default for FillRule {
fn default() -> Self { FillRule::NonZero } }
#[inline]
random_line_split
basic_shape.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! CSS handling for the [`basic-shape`](https://drafts.csswg.org/css-shapes/#typedef-basic-shape) //! types that are generic over their `ToCss` implementations. use euclid::size::Size2D; use std::fmt; use style_traits::{HasViewportPercentage, ToCss}; use values::computed::ComputedValueAsSpecified; use values::generics::BorderRadiusSize; use values::generics::position::Position; use values::generics::rect::Rect; use values::specified::url::SpecifiedUrl; /// A clipping shape, for `clip-path`. pub type ClippingShape<BasicShape> = ShapeSource<BasicShape, GeometryBox>; /// https://drafts.fxtf.org/css-masking-1/#typedef-geometry-box #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Copy, Debug, PartialEq)] pub enum GeometryBox { FillBox, StrokeBox, ViewBox, ShapeBox(ShapeBox), } impl ComputedValueAsSpecified for GeometryBox {} /// A float area shape, for `shape-outside`. pub type FloatAreaShape<BasicShape> = ShapeSource<BasicShape, ShapeBox>; // https://drafts.csswg.org/css-shapes-1/#typedef-shape-box define_css_keyword_enum!(ShapeBox: "margin-box" => MarginBox, "border-box" => BorderBox, "padding-box" => PaddingBox, "content-box" => ContentBox ); add_impls_for_keyword_enum!(ShapeBox); /// A shape source, for some reference box. #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] pub enum ShapeSource<BasicShape, ReferenceBox> { Url(SpecifiedUrl), Shape(BasicShape, Option<ReferenceBox>), Box(ReferenceBox), None, } #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] pub enum BasicShape<H, V, LengthOrPercentage> { Inset(InsetRect<LengthOrPercentage>), Circle(Circle<H, V, LengthOrPercentage>), Ellipse(Ellipse<H, V, LengthOrPercentage>), Polygon(Polygon<LengthOrPercentage>), } /// https://drafts.csswg.org/css-shapes/#funcdef-inset #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] pub struct InsetRect<LengthOrPercentage> { pub rect: Rect<LengthOrPercentage>, pub round: Option<BorderRadius<LengthOrPercentage>>, } /// A generic type used for `border-radius`, `outline-radius` and `inset()` values. /// /// https://drafts.csswg.org/css-backgrounds-3/#border-radius #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] pub struct BorderRadius<LengthOrPercentage> { /// The top left radius. pub top_left: BorderRadiusSize<LengthOrPercentage>, /// The top right radius. pub top_right: BorderRadiusSize<LengthOrPercentage>, /// The bottom right radius. pub bottom_right: BorderRadiusSize<LengthOrPercentage>, /// The bottom left radius. pub bottom_left: BorderRadiusSize<LengthOrPercentage>, } /// https://drafts.csswg.org/css-shapes/#funcdef-circle #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Copy, Debug, PartialEq, ToComputedValue)] pub struct Circle<H, V, LengthOrPercentage> { pub position: Position<H, V>, pub radius: ShapeRadius<LengthOrPercentage>, } /// https://drafts.csswg.org/css-shapes/#funcdef-ellipse #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Copy, Debug, PartialEq, ToComputedValue)] pub struct Ellipse<H, V, LengthOrPercentage> { pub position: Position<H, V>, pub semiaxis_x: ShapeRadius<LengthOrPercentage>, pub semiaxis_y: ShapeRadius<LengthOrPercentage>, } /// https://drafts.csswg.org/css-shapes/#typedef-shape-radius #[allow(missing_docs)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Copy, Debug, PartialEq, ToComputedValue)] pub enum ShapeRadius<LengthOrPercentage> { Length(LengthOrPercentage), ClosestSide, FarthestSide, } #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, PartialEq, ToComputedValue)] /// A generic type for representing the `polygon()` function /// /// https://drafts.csswg.org/css-shapes/#funcdef-polygon pub struct Polygon<LengthOrPercentage> { /// The filling rule for a polygon. pub fill: FillRule, /// A collection of (x, y) coordinates to draw the polygon. pub coordinates: Vec<(LengthOrPercentage, LengthOrPercentage)>, } // https://drafts.csswg.org/css-shapes/#typedef-fill-rule // NOTE: Basic shapes spec says that these are the only two values, however // https://www.w3.org/TR/SVG/painting.html#FillRuleProperty // says that it can also be `inherit` define_css_keyword_enum!(FillRule: "nonzero" => NonZero, "evenodd" => EvenOdd ); add_impls_for_keyword_enum!(FillRule); impl<B, T> HasViewportPercentage for ShapeSource<B, T> { #[inline] fn has_viewport_percentage(&self) -> bool { false } } impl<B: ToCss, T: ToCss> ToCss for ShapeSource<B, T> { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { ShapeSource::Url(ref url) => url.to_css(dest), ShapeSource::Shape(ref shape, Some(ref ref_box)) => { shape.to_css(dest)?; dest.write_str(" ")?; ref_box.to_css(dest) }, ShapeSource::Shape(ref shape, None) => shape.to_css(dest), ShapeSource::Box(ref val) => val.to_css(dest), ShapeSource::None => dest.write_str("none"), } } } impl ToCss for GeometryBox { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { GeometryBox::FillBox => dest.write_str("fill-box"), GeometryBox::StrokeBox => dest.write_str("stroke-box"), GeometryBox::ViewBox => dest.write_str("view-box"), GeometryBox::ShapeBox(s) => s.to_css(dest), } } } impl<H, V, L> ToCss for BasicShape<H, V, L> where H: ToCss, V: ToCss, L: PartialEq + ToCss, Circle<H, V, L>: ToCss, Ellipse<H, V, L>: ToCss, { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { BasicShape::Inset(ref rect) => rect.to_css(dest), BasicShape::Circle(ref circle) => circle.to_css(dest), BasicShape::Ellipse(ref ellipse) => ellipse.to_css(dest), BasicShape::Polygon(ref polygon) => polygon.to_css(dest), } } } impl<L> ToCss for InsetRect<L> where L: ToCss + PartialEq { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { dest.write_str("inset(")?; self.rect.to_css(dest)?; if let Some(ref radius) = self.round { dest.write_str(" round ")?; radius.to_css(dest)?; } dest.write_str(")") } } impl<L: ToCss + PartialEq> ToCss for BorderRadius<L> { #[inline] fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { serialize_radius_values(dest, &self.top_left.0, &self.top_right.0, &self.bottom_right.0, &self.bottom_left.0) } } /// Serialization helper for types of longhands like `border-radius` and `outline-radius` pub fn serialize_radius_values<L, W>(dest: &mut W, top_left: &Size2D<L>, top_right: &Size2D<L>, bottom_right: &Size2D<L>, bottom_left: &Size2D<L>) -> fmt::Result where L: ToCss + PartialEq, W: fmt::Write { Rect::new(&top_left.width, &top_right.width, &bottom_right.width, &bottom_left.width).to_css(dest)?; if top_left.width!= top_left.height || top_right.width!= top_right.height || bottom_right.width!= bottom_right.height || bottom_left.width!= bottom_left.height { dest.write_str(" / ")?; Rect::new(&top_left.height, &top_right.height, &bottom_right.height, &bottom_left.height).to_css(dest)?; } Ok(()) } impl<L> Default for ShapeRadius<L> { #[inline] fn
() -> Self { ShapeRadius::ClosestSide } } impl<L: ToCss> ToCss for ShapeRadius<L> { #[inline] fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { ShapeRadius::Length(ref lop) => lop.to_css(dest), ShapeRadius::ClosestSide => dest.write_str("closest-side"), ShapeRadius::FarthestSide => dest.write_str("farthest-side"), } } } impl<L: ToCss> ToCss for Polygon<L> { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { dest.write_str("polygon(")?; if self.fill!= FillRule::default() { self.fill.to_css(dest)?; dest.write_str(", ")?; } for (i, coord) in self.coordinates.iter().enumerate() { if i > 0 { dest.write_str(", ")?; } coord.0.to_css(dest)?; dest.write_str(" ")?; coord.1.to_css(dest)?; } dest.write_str(")") } } impl Default for FillRule { #[inline] fn default() -> Self { FillRule::NonZero } }
default
identifier_name
access.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /// An exclusive access primitive /// /// This primitive is used to gain exclusive access to read() and write() in uv. /// It is assumed that all invocations of this struct happen on the same thread /// (the uv event loop). use alloc::arc::Arc; use std::mem; use std::rt::local::Local; use std::rt::task::{BlockedTask, Task}; use std::cell::UnsafeCell; use homing::HomingMissile; pub struct Access<T> { inner: Arc<UnsafeCell<Inner<T>>>, } pub struct Guard<'a, T:'static> { access: &'a mut Access<T>, missile: Option<HomingMissile>, } struct Inner<T> { queue: Vec<(BlockedTask, uint)>, held: bool, closed: bool, data: T, } impl<T: Send> Access<T> { pub fn new(data: T) -> Access<T> { Access { inner: Arc::new(UnsafeCell::new(Inner { queue: vec![], held: false, closed: false, data: data, })) } } pub fn grant<'a>(&'a mut self, token: uint, missile: HomingMissile) -> Guard<'a, T> { // This unsafety is actually OK because the homing missile argument // guarantees that we're on the same event loop as all the other objects // attempting to get access granted. let inner = unsafe { &mut *self.inner.get() }; if inner.held { let t: Box<Task> = Local::take(); t.deschedule(1, |task| { inner.queue.push((task, token)); Ok(()) }); assert!(inner.held); } else { inner.held = true; } Guard { access: self, missile: Some(missile) } } pub fn unsafe_get(&self) -> *mut T { unsafe { &mut (*self.inner.get()).data as *mut _ } } // Safe version which requires proof that you are on the home scheduler. pub fn
<'a>(&'a mut self, _missile: &HomingMissile) -> &'a mut T { unsafe { &mut *self.unsafe_get() } } pub fn close(&self, _missile: &HomingMissile) { // This unsafety is OK because with a homing missile we're guaranteed to // be the only task looking at the `closed` flag (and are therefore // allowed to modify it). Additionally, no atomics are necessary because // everyone's running on the same thread and has already done the // necessary synchronization to be running on this thread. unsafe { (*self.inner.get()).closed = true; } } // Dequeue a blocked task with a specified token. This is unsafe because it // is only safe to invoke while on the home event loop, and there is no // guarantee that this i being invoked on the home event loop. pub unsafe fn dequeue(&mut self, token: uint) -> Option<BlockedTask> { let inner = &mut *self.inner.get(); match inner.queue.iter().position(|&(_, t)| t == token) { Some(i) => Some(inner.queue.remove(i).unwrap().val0()), None => None, } } /// Test whether this access is closed, using a homing missile to prove /// that it's safe pub fn is_closed(&self, _missile: &HomingMissile) -> bool { unsafe { (*self.inner.get()).closed } } } impl<T: Send> Clone for Access<T> { fn clone(&self) -> Access<T> { Access { inner: self.inner.clone() } } } impl<'a, T: Send> Guard<'a, T> { pub fn is_closed(&self) -> bool { // See above for why this unsafety is ok, it just applies to the read // instead of the write. unsafe { (*self.access.inner.get()).closed } } } impl<'a, T: Send> Deref<T> for Guard<'a, T> { fn deref<'a>(&'a self) -> &'a T { // A guard represents exclusive access to a piece of data, so it's safe // to hand out shared and mutable references unsafe { &(*self.access.inner.get()).data } } } impl<'a, T: Send> DerefMut<T> for Guard<'a, T> { fn deref_mut<'a>(&'a mut self) -> &'a mut T { unsafe { &mut (*self.access.inner.get()).data } } } #[unsafe_destructor] impl<'a, T> Drop for Guard<'a, T> { fn drop(&mut self) { // This guard's homing missile is still armed, so we're guaranteed to be // on the same I/O event loop, so this unsafety should be ok. assert!(self.missile.is_some()); let inner: &mut Inner<T> = unsafe { mem::transmute(self.access.inner.get()) }; match inner.queue.remove(0) { // Here we have found a task that was waiting for access, and we // current have the "access lock" we need to relinquish access to // this sleeping task. // // To do so, we first drop out homing missile and we then reawaken // the task. In reawakening the task, it will be immediately // scheduled on this scheduler. Because we might be woken up on some // other scheduler, we drop our homing missile before we reawaken // the task. Some((task, _)) => { drop(self.missile.take()); task.reawaken(); } None => { inner.held = false; } } } } #[unsafe_destructor] impl<T> Drop for Inner<T> { fn drop(&mut self) { assert!(!self.held); assert_eq!(self.queue.len(), 0); } }
get_mut
identifier_name
access.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /// An exclusive access primitive /// /// This primitive is used to gain exclusive access to read() and write() in uv. /// It is assumed that all invocations of this struct happen on the same thread /// (the uv event loop). use alloc::arc::Arc; use std::mem; use std::rt::local::Local; use std::rt::task::{BlockedTask, Task}; use std::cell::UnsafeCell; use homing::HomingMissile; pub struct Access<T> { inner: Arc<UnsafeCell<Inner<T>>>, } pub struct Guard<'a, T:'static> { access: &'a mut Access<T>, missile: Option<HomingMissile>, } struct Inner<T> { queue: Vec<(BlockedTask, uint)>, held: bool, closed: bool, data: T, } impl<T: Send> Access<T> { pub fn new(data: T) -> Access<T> { Access { inner: Arc::new(UnsafeCell::new(Inner {
} } pub fn grant<'a>(&'a mut self, token: uint, missile: HomingMissile) -> Guard<'a, T> { // This unsafety is actually OK because the homing missile argument // guarantees that we're on the same event loop as all the other objects // attempting to get access granted. let inner = unsafe { &mut *self.inner.get() }; if inner.held { let t: Box<Task> = Local::take(); t.deschedule(1, |task| { inner.queue.push((task, token)); Ok(()) }); assert!(inner.held); } else { inner.held = true; } Guard { access: self, missile: Some(missile) } } pub fn unsafe_get(&self) -> *mut T { unsafe { &mut (*self.inner.get()).data as *mut _ } } // Safe version which requires proof that you are on the home scheduler. pub fn get_mut<'a>(&'a mut self, _missile: &HomingMissile) -> &'a mut T { unsafe { &mut *self.unsafe_get() } } pub fn close(&self, _missile: &HomingMissile) { // This unsafety is OK because with a homing missile we're guaranteed to // be the only task looking at the `closed` flag (and are therefore // allowed to modify it). Additionally, no atomics are necessary because // everyone's running on the same thread and has already done the // necessary synchronization to be running on this thread. unsafe { (*self.inner.get()).closed = true; } } // Dequeue a blocked task with a specified token. This is unsafe because it // is only safe to invoke while on the home event loop, and there is no // guarantee that this i being invoked on the home event loop. pub unsafe fn dequeue(&mut self, token: uint) -> Option<BlockedTask> { let inner = &mut *self.inner.get(); match inner.queue.iter().position(|&(_, t)| t == token) { Some(i) => Some(inner.queue.remove(i).unwrap().val0()), None => None, } } /// Test whether this access is closed, using a homing missile to prove /// that it's safe pub fn is_closed(&self, _missile: &HomingMissile) -> bool { unsafe { (*self.inner.get()).closed } } } impl<T: Send> Clone for Access<T> { fn clone(&self) -> Access<T> { Access { inner: self.inner.clone() } } } impl<'a, T: Send> Guard<'a, T> { pub fn is_closed(&self) -> bool { // See above for why this unsafety is ok, it just applies to the read // instead of the write. unsafe { (*self.access.inner.get()).closed } } } impl<'a, T: Send> Deref<T> for Guard<'a, T> { fn deref<'a>(&'a self) -> &'a T { // A guard represents exclusive access to a piece of data, so it's safe // to hand out shared and mutable references unsafe { &(*self.access.inner.get()).data } } } impl<'a, T: Send> DerefMut<T> for Guard<'a, T> { fn deref_mut<'a>(&'a mut self) -> &'a mut T { unsafe { &mut (*self.access.inner.get()).data } } } #[unsafe_destructor] impl<'a, T> Drop for Guard<'a, T> { fn drop(&mut self) { // This guard's homing missile is still armed, so we're guaranteed to be // on the same I/O event loop, so this unsafety should be ok. assert!(self.missile.is_some()); let inner: &mut Inner<T> = unsafe { mem::transmute(self.access.inner.get()) }; match inner.queue.remove(0) { // Here we have found a task that was waiting for access, and we // current have the "access lock" we need to relinquish access to // this sleeping task. // // To do so, we first drop out homing missile and we then reawaken // the task. In reawakening the task, it will be immediately // scheduled on this scheduler. Because we might be woken up on some // other scheduler, we drop our homing missile before we reawaken // the task. Some((task, _)) => { drop(self.missile.take()); task.reawaken(); } None => { inner.held = false; } } } } #[unsafe_destructor] impl<T> Drop for Inner<T> { fn drop(&mut self) { assert!(!self.held); assert_eq!(self.queue.len(), 0); } }
queue: vec![], held: false, closed: false, data: data, }))
random_line_split
access.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /// An exclusive access primitive /// /// This primitive is used to gain exclusive access to read() and write() in uv. /// It is assumed that all invocations of this struct happen on the same thread /// (the uv event loop). use alloc::arc::Arc; use std::mem; use std::rt::local::Local; use std::rt::task::{BlockedTask, Task}; use std::cell::UnsafeCell; use homing::HomingMissile; pub struct Access<T> { inner: Arc<UnsafeCell<Inner<T>>>, } pub struct Guard<'a, T:'static> { access: &'a mut Access<T>, missile: Option<HomingMissile>, } struct Inner<T> { queue: Vec<(BlockedTask, uint)>, held: bool, closed: bool, data: T, } impl<T: Send> Access<T> { pub fn new(data: T) -> Access<T> { Access { inner: Arc::new(UnsafeCell::new(Inner { queue: vec![], held: false, closed: false, data: data, })) } } pub fn grant<'a>(&'a mut self, token: uint, missile: HomingMissile) -> Guard<'a, T> { // This unsafety is actually OK because the homing missile argument // guarantees that we're on the same event loop as all the other objects // attempting to get access granted. let inner = unsafe { &mut *self.inner.get() }; if inner.held { let t: Box<Task> = Local::take(); t.deschedule(1, |task| { inner.queue.push((task, token)); Ok(()) }); assert!(inner.held); } else { inner.held = true; } Guard { access: self, missile: Some(missile) } } pub fn unsafe_get(&self) -> *mut T { unsafe { &mut (*self.inner.get()).data as *mut _ } } // Safe version which requires proof that you are on the home scheduler. pub fn get_mut<'a>(&'a mut self, _missile: &HomingMissile) -> &'a mut T { unsafe { &mut *self.unsafe_get() } } pub fn close(&self, _missile: &HomingMissile) { // This unsafety is OK because with a homing missile we're guaranteed to // be the only task looking at the `closed` flag (and are therefore // allowed to modify it). Additionally, no atomics are necessary because // everyone's running on the same thread and has already done the // necessary synchronization to be running on this thread. unsafe { (*self.inner.get()).closed = true; } } // Dequeue a blocked task with a specified token. This is unsafe because it // is only safe to invoke while on the home event loop, and there is no // guarantee that this i being invoked on the home event loop. pub unsafe fn dequeue(&mut self, token: uint) -> Option<BlockedTask> { let inner = &mut *self.inner.get(); match inner.queue.iter().position(|&(_, t)| t == token) { Some(i) => Some(inner.queue.remove(i).unwrap().val0()), None => None, } } /// Test whether this access is closed, using a homing missile to prove /// that it's safe pub fn is_closed(&self, _missile: &HomingMissile) -> bool { unsafe { (*self.inner.get()).closed } } } impl<T: Send> Clone for Access<T> { fn clone(&self) -> Access<T> { Access { inner: self.inner.clone() } } } impl<'a, T: Send> Guard<'a, T> { pub fn is_closed(&self) -> bool { // See above for why this unsafety is ok, it just applies to the read // instead of the write. unsafe { (*self.access.inner.get()).closed } } } impl<'a, T: Send> Deref<T> for Guard<'a, T> { fn deref<'a>(&'a self) -> &'a T { // A guard represents exclusive access to a piece of data, so it's safe // to hand out shared and mutable references unsafe { &(*self.access.inner.get()).data } } } impl<'a, T: Send> DerefMut<T> for Guard<'a, T> { fn deref_mut<'a>(&'a mut self) -> &'a mut T
} #[unsafe_destructor] impl<'a, T> Drop for Guard<'a, T> { fn drop(&mut self) { // This guard's homing missile is still armed, so we're guaranteed to be // on the same I/O event loop, so this unsafety should be ok. assert!(self.missile.is_some()); let inner: &mut Inner<T> = unsafe { mem::transmute(self.access.inner.get()) }; match inner.queue.remove(0) { // Here we have found a task that was waiting for access, and we // current have the "access lock" we need to relinquish access to // this sleeping task. // // To do so, we first drop out homing missile and we then reawaken // the task. In reawakening the task, it will be immediately // scheduled on this scheduler. Because we might be woken up on some // other scheduler, we drop our homing missile before we reawaken // the task. Some((task, _)) => { drop(self.missile.take()); task.reawaken(); } None => { inner.held = false; } } } } #[unsafe_destructor] impl<T> Drop for Inner<T> { fn drop(&mut self) { assert!(!self.held); assert_eq!(self.queue.len(), 0); } }
{ unsafe { &mut (*self.access.inner.get()).data } }
identifier_body
path_utils.rs
use crate::borrow_check::borrow_set::{BorrowData, BorrowSet, TwoPhaseActivation}; use crate::borrow_check::places_conflict; use crate::borrow_check::AccessDepth; use crate::borrow_check::Upvar; use crate::dataflow::indexes::BorrowIndex; use rustc_data_structures::graph::dominators::Dominators; use rustc_middle::mir::BorrowKind; use rustc_middle::mir::{BasicBlock, Body, Field, Location, Place, PlaceRef, ProjectionElem}; use rustc_middle::ty::TyCtxt; /// Returns `true` if the borrow represented by `kind` is /// allowed to be split into separate Reservation and /// Activation phases. pub(super) fn allow_two_phase_borrow(kind: BorrowKind) -> bool { kind.allows_two_phase_borrow() } /// Control for the path borrow checking code #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub(super) enum Control { Continue, Break, } /// Encapsulates the idea of iterating over every borrow that involves a particular path pub(super) fn each_borrow_involving_path<'tcx, F, I, S>( s: &mut S, tcx: TyCtxt<'tcx>, body: &Body<'tcx>, _location: Location, access_place: (AccessDepth, Place<'tcx>), borrow_set: &BorrowSet<'tcx>, candidates: I, mut op: F, ) where F: FnMut(&mut S, BorrowIndex, &BorrowData<'tcx>) -> Control, I: Iterator<Item = BorrowIndex>, { let (access, place) = access_place; // FIXME: analogous code in check_loans first maps `place` to // its base_path. // check for loan restricting path P being used. Accounts for // borrows of P, P.a.b, etc. for i in candidates { let borrowed = &borrow_set[i]; if places_conflict::borrow_conflicts_with_place( tcx, body, borrowed.borrowed_place, borrowed.kind, place.as_ref(), access, places_conflict::PlaceConflictBias::Overlap, ) { debug!( "each_borrow_involving_path: {:?} @ {:?} vs. {:?}/{:?}", i, borrowed, place, access ); let ctrl = op(s, i, borrowed); if ctrl == Control::Break { return; } } } } pub(super) fn is_active<'tcx>( dominators: &Dominators<BasicBlock>, borrow_data: &BorrowData<'tcx>, location: Location, ) -> bool { debug!("is_active(borrow_data={:?}, location={:?})", borrow_data, location); let activation_location = match borrow_data.activation_location { // If this is not a 2-phase borrow, it is always active. TwoPhaseActivation::NotTwoPhase => return true, // And if the unique 2-phase use is not an activation, then it is *never* active. TwoPhaseActivation::NotActivated => return false, // Otherwise, we derive info from the activation point `loc`: TwoPhaseActivation::ActivatedAt(loc) => loc, }; // Otherwise, it is active for every location *except* in between // the reservation and the activation: // // X // / // R <--+ Except for this // / \ | diamond // \ / | // A <------+ // | // Z // // Note that we assume that: // - the reservation R dominates the activation A // - the activation A post-dominates the reservation R (ignoring unwinding edges). // // This means that there can't be an edge that leaves A and // comes back into that diamond unless it passes through R. // // Suboptimal: In some cases, this code walks the dominator // tree twice when it only has to be walked once. I am // lazy. -nmatsakis // If dominated by the activation A, then it is active. The // activation occurs upon entering the point A, so this is // also true if location == activation_location. if activation_location.dominates(location, dominators) { return true; } // The reservation starts *on exiting* the reservation block, // so check if the location is dominated by R.successor. If so, // this point falls in between the reservation and location. let reserve_location = borrow_data.reserve_location.successor_within_block(); if reserve_location.dominates(location, dominators) { false } else { // Otherwise, this point is outside the diamond, so // consider the borrow active. This could happen for // example if the borrow remains active around a loop (in // which case it would be active also for the point R, // which would generate an error). true } } /// Determines if a given borrow is borrowing local data /// This is called for all Yield expressions on movable generators pub(super) fn
(place: Place<'_>) -> bool { // Reborrow of already borrowed data is ignored // Any errors will be caught on the initial borrow !place.is_indirect() } /// If `place` is a field projection, and the field is being projected from a closure type, /// then returns the index of the field being projected. Note that this closure will always /// be `self` in the current MIR, because that is the only time we directly access the fields /// of a closure type. pub(crate) fn is_upvar_field_projection( tcx: TyCtxt<'tcx>, upvars: &[Upvar<'tcx>], place_ref: PlaceRef<'tcx>, body: &Body<'tcx>, ) -> Option<Field> { let mut place_ref = place_ref; let mut by_ref = false; if let Some((place_base, ProjectionElem::Deref)) = place_ref.last_projection() { place_ref = place_base; by_ref = true; } match place_ref.last_projection() { Some((place_base, ProjectionElem::Field(field, _ty))) => { let base_ty = place_base.ty(body, tcx).ty; if (base_ty.is_closure() || base_ty.is_generator()) && (!by_ref || upvars[field.index()].by_ref) { Some(field) } else { None } } _ => None, } }
borrow_of_local_data
identifier_name
path_utils.rs
use crate::borrow_check::borrow_set::{BorrowData, BorrowSet, TwoPhaseActivation}; use crate::borrow_check::places_conflict; use crate::borrow_check::AccessDepth; use crate::borrow_check::Upvar; use crate::dataflow::indexes::BorrowIndex; use rustc_data_structures::graph::dominators::Dominators; use rustc_middle::mir::BorrowKind; use rustc_middle::mir::{BasicBlock, Body, Field, Location, Place, PlaceRef, ProjectionElem}; use rustc_middle::ty::TyCtxt; /// Returns `true` if the borrow represented by `kind` is /// allowed to be split into separate Reservation and /// Activation phases. pub(super) fn allow_two_phase_borrow(kind: BorrowKind) -> bool { kind.allows_two_phase_borrow() } /// Control for the path borrow checking code #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub(super) enum Control { Continue, Break, } /// Encapsulates the idea of iterating over every borrow that involves a particular path pub(super) fn each_borrow_involving_path<'tcx, F, I, S>( s: &mut S, tcx: TyCtxt<'tcx>, body: &Body<'tcx>, _location: Location, access_place: (AccessDepth, Place<'tcx>), borrow_set: &BorrowSet<'tcx>, candidates: I, mut op: F, ) where F: FnMut(&mut S, BorrowIndex, &BorrowData<'tcx>) -> Control, I: Iterator<Item = BorrowIndex>, { let (access, place) = access_place; // FIXME: analogous code in check_loans first maps `place` to // its base_path. // check for loan restricting path P being used. Accounts for // borrows of P, P.a.b, etc. for i in candidates { let borrowed = &borrow_set[i]; if places_conflict::borrow_conflicts_with_place( tcx, body, borrowed.borrowed_place, borrowed.kind, place.as_ref(), access, places_conflict::PlaceConflictBias::Overlap, ) { debug!( "each_borrow_involving_path: {:?} @ {:?} vs. {:?}/{:?}", i, borrowed, place, access ); let ctrl = op(s, i, borrowed); if ctrl == Control::Break { return; } } } } pub(super) fn is_active<'tcx>( dominators: &Dominators<BasicBlock>, borrow_data: &BorrowData<'tcx>, location: Location, ) -> bool { debug!("is_active(borrow_data={:?}, location={:?})", borrow_data, location); let activation_location = match borrow_data.activation_location { // If this is not a 2-phase borrow, it is always active. TwoPhaseActivation::NotTwoPhase => return true, // And if the unique 2-phase use is not an activation, then it is *never* active. TwoPhaseActivation::NotActivated => return false, // Otherwise, we derive info from the activation point `loc`: TwoPhaseActivation::ActivatedAt(loc) => loc, };
// // X // / // R <--+ Except for this // / \ | diamond // \ / | // A <------+ // | // Z // // Note that we assume that: // - the reservation R dominates the activation A // - the activation A post-dominates the reservation R (ignoring unwinding edges). // // This means that there can't be an edge that leaves A and // comes back into that diamond unless it passes through R. // // Suboptimal: In some cases, this code walks the dominator // tree twice when it only has to be walked once. I am // lazy. -nmatsakis // If dominated by the activation A, then it is active. The // activation occurs upon entering the point A, so this is // also true if location == activation_location. if activation_location.dominates(location, dominators) { return true; } // The reservation starts *on exiting* the reservation block, // so check if the location is dominated by R.successor. If so, // this point falls in between the reservation and location. let reserve_location = borrow_data.reserve_location.successor_within_block(); if reserve_location.dominates(location, dominators) { false } else { // Otherwise, this point is outside the diamond, so // consider the borrow active. This could happen for // example if the borrow remains active around a loop (in // which case it would be active also for the point R, // which would generate an error). true } } /// Determines if a given borrow is borrowing local data /// This is called for all Yield expressions on movable generators pub(super) fn borrow_of_local_data(place: Place<'_>) -> bool { // Reborrow of already borrowed data is ignored // Any errors will be caught on the initial borrow !place.is_indirect() } /// If `place` is a field projection, and the field is being projected from a closure type, /// then returns the index of the field being projected. Note that this closure will always /// be `self` in the current MIR, because that is the only time we directly access the fields /// of a closure type. pub(crate) fn is_upvar_field_projection( tcx: TyCtxt<'tcx>, upvars: &[Upvar<'tcx>], place_ref: PlaceRef<'tcx>, body: &Body<'tcx>, ) -> Option<Field> { let mut place_ref = place_ref; let mut by_ref = false; if let Some((place_base, ProjectionElem::Deref)) = place_ref.last_projection() { place_ref = place_base; by_ref = true; } match place_ref.last_projection() { Some((place_base, ProjectionElem::Field(field, _ty))) => { let base_ty = place_base.ty(body, tcx).ty; if (base_ty.is_closure() || base_ty.is_generator()) && (!by_ref || upvars[field.index()].by_ref) { Some(field) } else { None } } _ => None, } }
// Otherwise, it is active for every location *except* in between // the reservation and the activation:
random_line_split
path_utils.rs
use crate::borrow_check::borrow_set::{BorrowData, BorrowSet, TwoPhaseActivation}; use crate::borrow_check::places_conflict; use crate::borrow_check::AccessDepth; use crate::borrow_check::Upvar; use crate::dataflow::indexes::BorrowIndex; use rustc_data_structures::graph::dominators::Dominators; use rustc_middle::mir::BorrowKind; use rustc_middle::mir::{BasicBlock, Body, Field, Location, Place, PlaceRef, ProjectionElem}; use rustc_middle::ty::TyCtxt; /// Returns `true` if the borrow represented by `kind` is /// allowed to be split into separate Reservation and /// Activation phases. pub(super) fn allow_two_phase_borrow(kind: BorrowKind) -> bool { kind.allows_two_phase_borrow() } /// Control for the path borrow checking code #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub(super) enum Control { Continue, Break, } /// Encapsulates the idea of iterating over every borrow that involves a particular path pub(super) fn each_borrow_involving_path<'tcx, F, I, S>( s: &mut S, tcx: TyCtxt<'tcx>, body: &Body<'tcx>, _location: Location, access_place: (AccessDepth, Place<'tcx>), borrow_set: &BorrowSet<'tcx>, candidates: I, mut op: F, ) where F: FnMut(&mut S, BorrowIndex, &BorrowData<'tcx>) -> Control, I: Iterator<Item = BorrowIndex>, { let (access, place) = access_place; // FIXME: analogous code in check_loans first maps `place` to // its base_path. // check for loan restricting path P being used. Accounts for // borrows of P, P.a.b, etc. for i in candidates { let borrowed = &borrow_set[i]; if places_conflict::borrow_conflicts_with_place( tcx, body, borrowed.borrowed_place, borrowed.kind, place.as_ref(), access, places_conflict::PlaceConflictBias::Overlap, ) { debug!( "each_borrow_involving_path: {:?} @ {:?} vs. {:?}/{:?}", i, borrowed, place, access ); let ctrl = op(s, i, borrowed); if ctrl == Control::Break { return; } } } } pub(super) fn is_active<'tcx>( dominators: &Dominators<BasicBlock>, borrow_data: &BorrowData<'tcx>, location: Location, ) -> bool { debug!("is_active(borrow_data={:?}, location={:?})", borrow_data, location); let activation_location = match borrow_data.activation_location { // If this is not a 2-phase borrow, it is always active. TwoPhaseActivation::NotTwoPhase => return true, // And if the unique 2-phase use is not an activation, then it is *never* active. TwoPhaseActivation::NotActivated => return false, // Otherwise, we derive info from the activation point `loc`: TwoPhaseActivation::ActivatedAt(loc) => loc, }; // Otherwise, it is active for every location *except* in between // the reservation and the activation: // // X // / // R <--+ Except for this // / \ | diamond // \ / | // A <------+ // | // Z // // Note that we assume that: // - the reservation R dominates the activation A // - the activation A post-dominates the reservation R (ignoring unwinding edges). // // This means that there can't be an edge that leaves A and // comes back into that diamond unless it passes through R. // // Suboptimal: In some cases, this code walks the dominator // tree twice when it only has to be walked once. I am // lazy. -nmatsakis // If dominated by the activation A, then it is active. The // activation occurs upon entering the point A, so this is // also true if location == activation_location. if activation_location.dominates(location, dominators) { return true; } // The reservation starts *on exiting* the reservation block, // so check if the location is dominated by R.successor. If so, // this point falls in between the reservation and location. let reserve_location = borrow_data.reserve_location.successor_within_block(); if reserve_location.dominates(location, dominators)
else { // Otherwise, this point is outside the diamond, so // consider the borrow active. This could happen for // example if the borrow remains active around a loop (in // which case it would be active also for the point R, // which would generate an error). true } } /// Determines if a given borrow is borrowing local data /// This is called for all Yield expressions on movable generators pub(super) fn borrow_of_local_data(place: Place<'_>) -> bool { // Reborrow of already borrowed data is ignored // Any errors will be caught on the initial borrow !place.is_indirect() } /// If `place` is a field projection, and the field is being projected from a closure type, /// then returns the index of the field being projected. Note that this closure will always /// be `self` in the current MIR, because that is the only time we directly access the fields /// of a closure type. pub(crate) fn is_upvar_field_projection( tcx: TyCtxt<'tcx>, upvars: &[Upvar<'tcx>], place_ref: PlaceRef<'tcx>, body: &Body<'tcx>, ) -> Option<Field> { let mut place_ref = place_ref; let mut by_ref = false; if let Some((place_base, ProjectionElem::Deref)) = place_ref.last_projection() { place_ref = place_base; by_ref = true; } match place_ref.last_projection() { Some((place_base, ProjectionElem::Field(field, _ty))) => { let base_ty = place_base.ty(body, tcx).ty; if (base_ty.is_closure() || base_ty.is_generator()) && (!by_ref || upvars[field.index()].by_ref) { Some(field) } else { None } } _ => None, } }
{ false }
conditional_block
cfg-macros-notfoo.rs
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-fast compile-flags directive doesn't work for check-fast // compile-flags: // check that cfg correctly chooses between the macro impls (see also // cfg-macros-foo.rs) #[feature(macro_rules)]; #[cfg(foo)] #[macro_escape] mod foo { macro_rules! bar { () => { true } } } #[cfg(not(foo))] #[macro_escape] mod foo { macro_rules! bar { () => { false } } } pub fn
() { assert!(!bar!()) }
main
identifier_name
cfg-macros-notfoo.rs
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-fast compile-flags directive doesn't work for check-fast // compile-flags: // check that cfg correctly chooses between the macro impls (see also // cfg-macros-foo.rs) #[feature(macro_rules)]; #[cfg(foo)] #[macro_escape] mod foo { macro_rules! bar { () => { true } } } #[cfg(not(foo))] #[macro_escape] mod foo { macro_rules! bar { () => { false }
}
} } pub fn main() { assert!(!bar!())
random_line_split
cfg-macros-notfoo.rs
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-fast compile-flags directive doesn't work for check-fast // compile-flags: // check that cfg correctly chooses between the macro impls (see also // cfg-macros-foo.rs) #[feature(macro_rules)]; #[cfg(foo)] #[macro_escape] mod foo { macro_rules! bar { () => { true } } } #[cfg(not(foo))] #[macro_escape] mod foo { macro_rules! bar { () => { false } } } pub fn main()
{ assert!(!bar!()) }
identifier_body
domrectlist.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::DOMRectListBinding; use dom::bindings::codegen::Bindings::DOMRectListBinding::DOMRectListMethods; use dom::bindings::global; use dom::bindings::js::{JS, JSRef, Temporary}; use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object}; use dom::domrect::DOMRect; use dom::window::Window; #[dom_struct] pub struct DOMRectList { reflector_: Reflector, rects: Vec<JS<DOMRect>>, window: JS<Window>, } impl DOMRectList { fn new_inherited(window: JSRef<Window>, rects: Vec<JSRef<DOMRect>>) -> DOMRectList { let rects = rects.iter().map(|rect| JS::from_rooted(*rect)).collect(); DOMRectList { reflector_: Reflector::new(), rects: rects, window: JS::from_rooted(window), } } pub fn new(window: JSRef<Window>, rects: Vec<JSRef<DOMRect>>) -> Temporary<DOMRectList> { reflect_dom_object(box DOMRectList::new_inherited(window, rects), &global::Window(window), DOMRectListBinding::Wrap) } } impl<'a> DOMRectListMethods for JSRef<'a, DOMRectList> { fn Length(self) -> u32 { self.rects.len() as u32 } fn Item(self, index: u32) -> Option<Temporary<DOMRect>> { let rects = &self.rects; if index < rects.len() as u32 { Some(Temporary::new(rects[index as uint].clone())) } else { None } }
} impl Reflectable for DOMRectList { fn reflector<'a>(&'a self) -> &'a Reflector { &self.reflector_ } }
fn IndexedGetter(self, index: u32, found: &mut bool) -> Option<Temporary<DOMRect>> { *found = index < self.rects.len() as u32; self.Item(index) }
random_line_split
domrectlist.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::DOMRectListBinding; use dom::bindings::codegen::Bindings::DOMRectListBinding::DOMRectListMethods; use dom::bindings::global; use dom::bindings::js::{JS, JSRef, Temporary}; use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object}; use dom::domrect::DOMRect; use dom::window::Window; #[dom_struct] pub struct DOMRectList { reflector_: Reflector, rects: Vec<JS<DOMRect>>, window: JS<Window>, } impl DOMRectList { fn new_inherited(window: JSRef<Window>, rects: Vec<JSRef<DOMRect>>) -> DOMRectList { let rects = rects.iter().map(|rect| JS::from_rooted(*rect)).collect(); DOMRectList { reflector_: Reflector::new(), rects: rects, window: JS::from_rooted(window), } } pub fn new(window: JSRef<Window>, rects: Vec<JSRef<DOMRect>>) -> Temporary<DOMRectList> { reflect_dom_object(box DOMRectList::new_inherited(window, rects), &global::Window(window), DOMRectListBinding::Wrap) } } impl<'a> DOMRectListMethods for JSRef<'a, DOMRectList> { fn Length(self) -> u32 { self.rects.len() as u32 } fn Item(self, index: u32) -> Option<Temporary<DOMRect>> { let rects = &self.rects; if index < rects.len() as u32 { Some(Temporary::new(rects[index as uint].clone())) } else { None } } fn IndexedGetter(self, index: u32, found: &mut bool) -> Option<Temporary<DOMRect>>
} impl Reflectable for DOMRectList { fn reflector<'a>(&'a self) -> &'a Reflector { &self.reflector_ } }
{ *found = index < self.rects.len() as u32; self.Item(index) }
identifier_body
domrectlist.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::DOMRectListBinding; use dom::bindings::codegen::Bindings::DOMRectListBinding::DOMRectListMethods; use dom::bindings::global; use dom::bindings::js::{JS, JSRef, Temporary}; use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object}; use dom::domrect::DOMRect; use dom::window::Window; #[dom_struct] pub struct DOMRectList { reflector_: Reflector, rects: Vec<JS<DOMRect>>, window: JS<Window>, } impl DOMRectList { fn
(window: JSRef<Window>, rects: Vec<JSRef<DOMRect>>) -> DOMRectList { let rects = rects.iter().map(|rect| JS::from_rooted(*rect)).collect(); DOMRectList { reflector_: Reflector::new(), rects: rects, window: JS::from_rooted(window), } } pub fn new(window: JSRef<Window>, rects: Vec<JSRef<DOMRect>>) -> Temporary<DOMRectList> { reflect_dom_object(box DOMRectList::new_inherited(window, rects), &global::Window(window), DOMRectListBinding::Wrap) } } impl<'a> DOMRectListMethods for JSRef<'a, DOMRectList> { fn Length(self) -> u32 { self.rects.len() as u32 } fn Item(self, index: u32) -> Option<Temporary<DOMRect>> { let rects = &self.rects; if index < rects.len() as u32 { Some(Temporary::new(rects[index as uint].clone())) } else { None } } fn IndexedGetter(self, index: u32, found: &mut bool) -> Option<Temporary<DOMRect>> { *found = index < self.rects.len() as u32; self.Item(index) } } impl Reflectable for DOMRectList { fn reflector<'a>(&'a self) -> &'a Reflector { &self.reflector_ } }
new_inherited
identifier_name
domrectlist.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::DOMRectListBinding; use dom::bindings::codegen::Bindings::DOMRectListBinding::DOMRectListMethods; use dom::bindings::global; use dom::bindings::js::{JS, JSRef, Temporary}; use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object}; use dom::domrect::DOMRect; use dom::window::Window; #[dom_struct] pub struct DOMRectList { reflector_: Reflector, rects: Vec<JS<DOMRect>>, window: JS<Window>, } impl DOMRectList { fn new_inherited(window: JSRef<Window>, rects: Vec<JSRef<DOMRect>>) -> DOMRectList { let rects = rects.iter().map(|rect| JS::from_rooted(*rect)).collect(); DOMRectList { reflector_: Reflector::new(), rects: rects, window: JS::from_rooted(window), } } pub fn new(window: JSRef<Window>, rects: Vec<JSRef<DOMRect>>) -> Temporary<DOMRectList> { reflect_dom_object(box DOMRectList::new_inherited(window, rects), &global::Window(window), DOMRectListBinding::Wrap) } } impl<'a> DOMRectListMethods for JSRef<'a, DOMRectList> { fn Length(self) -> u32 { self.rects.len() as u32 } fn Item(self, index: u32) -> Option<Temporary<DOMRect>> { let rects = &self.rects; if index < rects.len() as u32
else { None } } fn IndexedGetter(self, index: u32, found: &mut bool) -> Option<Temporary<DOMRect>> { *found = index < self.rects.len() as u32; self.Item(index) } } impl Reflectable for DOMRectList { fn reflector<'a>(&'a self) -> &'a Reflector { &self.reflector_ } }
{ Some(Temporary::new(rects[index as uint].clone())) }
conditional_block
normalized.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ access::ModuleAccess, file_format::{ AbilitySet, CompiledModule, FieldDefinition, FunctionHandle, SignatureToken, StructDefinition, StructFieldInformation, TypeParameterIndex, Visibility, }, }; use move_core_types::{ account_address::AccountAddress, identifier::Identifier, language_storage::{ModuleId, StructTag, TypeTag}, }; use std::collections::BTreeMap; /// Defines normalized representations of Move types, fields, kinds, structs, functions, and /// modules. These representations are useful in situations that require require comparing /// functions, resources, and types across modules. This arises in linking, compatibility checks /// (e.g., "is it safe to deploy this new module without updating its dependents and/or restarting /// genesis?"), defining schemas for resources stored on-chain, and (possibly in the future) /// allowing module updates transactions. /// A normalized version of `SignatureToken`, a type expression appearing in struct or function /// declarations. Unlike `SignatureToken`s, `normalized::Type`s from different modules can safely be /// compared. #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] pub enum Type { Bool, U8, U64, U128, Address, Signer, Struct { address: AccountAddress, module: Identifier, name: Identifier, type_arguments: Vec<Type>, }, Vector(Box<Type>), TypeParameter(TypeParameterIndex), Reference(Box<Type>), MutableReference(Box<Type>), } /// Normalized version of a `FieldDefinition`. The `name` is included even though it is /// metadata that it is ignored by the VM. The reason: names are important to clients. We would /// want a change from `Account { bal: u64, seq: u64 }` to `Account { seq: u64, bal: u64 }` to be /// marked as incompatible. Not safe to compare without an enclosing `Struct`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Field { pub name: Identifier, pub type_: Type, } /// Normalized version of a `StructDefinition`. Not safe to compare without an associated /// `ModuleId` or `Module`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Struct { pub name: Identifier, pub abilities: AbilitySet, pub type_parameters: Vec<AbilitySet>, pub fields: Vec<Field>, } /// Normalized version of a `FunctionDefinition`. Not safe to compare without an associated /// `ModuleId` or `Module`. #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] pub struct FunctionSignature { pub name: Identifier, pub type_parameters: Vec<AbilitySet>, pub formals: Vec<Type>, pub ret: Vec<Type>, } /// Normalized version of a `CompiledModule`: its address, name, struct declarations, and public /// function declarations. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Module { pub address: AccountAddress, pub name: Identifier, pub friends: Vec<ModuleId>, pub structs: Vec<Struct>, pub exposed_functions: BTreeMap<FunctionSignature, Visibility>, } impl Module { /// Extract a normalized module from a `CompiledModule`. The module `m` should be verified. /// Nothing will break here if that is not the case, but there is little point in computing a /// normalized representation of a module that won't verify (since it can't be published). pub fn new(m: &CompiledModule) -> Self { let friends = m.immediate_friends(); let structs = m.struct_defs().iter().map(|d| Struct::new(m, d)).collect(); let exposed_functions = m .function_defs() .iter() .filter(|func_def| match func_def.visibility { Visibility::Public | Visibility::Script | Visibility::Friend => true, Visibility::Private => false, }) .map(|func_def| { ( FunctionSignature::new(m, m.function_handle_at(func_def.function)), func_def.visibility, ) }) .collect(); Self { address: *m.address(), name: m.name().to_owned(), friends, structs, exposed_functions, } } pub fn module_id(&self) -> ModuleId { ModuleId::new(self.address, self.name.clone()) } } impl Type { /// Create a normalized `Type` for `SignatureToken` `s` in module `m`. pub fn new(m: &CompiledModule, s: &SignatureToken) -> Self { use SignatureToken::*; match s { Struct(shi) => { let s_handle = m.struct_handle_at(*shi); assert!(s_handle.type_parameters.is_empty(), "A struct with N type parameters should be encoded as StructModuleInstantiation with type_arguments = [TypeParameter(1),..., TypeParameter(N)]"); let m_handle = m.module_handle_at(s_handle.module); Type::Struct { address: *m.address_identifier_at(m_handle.address), module: m.identifier_at(m_handle.name).to_owned(), name: m.identifier_at(s_handle.name).to_owned(), type_arguments: Vec::new(), } } StructInstantiation(shi, type_actuals) => { let s_handle = m.struct_handle_at(*shi); let m_handle = m.module_handle_at(s_handle.module); Type::Struct { address: *m.address_identifier_at(m_handle.address), module: m.identifier_at(m_handle.name).to_owned(), name: m.identifier_at(s_handle.name).to_owned(), type_arguments: type_actuals.iter().map(|t| Type::new(m, t)).collect(), } } Bool => Type::Bool, U8 => Type::U8, U64 => Type::U64, U128 => Type::U128, Address => Type::Address, Signer => Type::Signer, Vector(t) => Type::Vector(Box::new(Type::new(m, t))), TypeParameter(i) => Type::TypeParameter(*i), Reference(t) => Type::Reference(Box::new(Type::new(m, t))), MutableReference(t) => Type::MutableReference(Box::new(Type::new(m, t))), }
use Type::*; match self { TypeParameter(_) => false, Bool => true, U8 => true, U64 => true, U128 => true, Address => true, Signer => true, Struct { type_arguments,.. } => type_arguments.iter().all(|t| t.is_closed()), Vector(t) | Reference(t) | MutableReference(t) => t.is_closed(), } } pub fn into_type_tag(self) -> Option<TypeTag> { use Type::*; Some(if self.is_closed() { match self { Reference(_) | MutableReference(_) => return None, Bool => TypeTag::Bool, U8 => TypeTag::U8, U64 => TypeTag::U64, U128 => TypeTag::U128, Address => TypeTag::Address, Signer => TypeTag::Signer, Vector(t) => TypeTag::Vector(Box::new( t.into_type_tag() .expect("Invariant violation: vector type argument contains reference"), )), Struct { address, module, name, type_arguments, } => TypeTag::Struct(StructTag { address, module, name, type_params: type_arguments .into_iter() .map(|t| { t.into_type_tag().expect( "Invariant violation: struct type argument contains reference", ) }) .collect(), }), TypeParameter(_) => unreachable!(), } } else { return None; }) } } impl Field { /// Create a `Field` for `FieldDefinition` `f` in module `m`. pub fn new(m: &CompiledModule, f: &FieldDefinition) -> Self { Field { name: m.identifier_at(f.name).to_owned(), type_: Type::new(m, &f.signature.0), } } } impl Struct { /// Create a `Struct` for `StructDefinition` `def` in module `m`. Panics if `def` is a /// a native struct definition. pub fn new(m: &CompiledModule, def: &StructDefinition) -> Self { let handle = m.struct_handle_at(def.struct_handle); let fields = match &def.field_information { StructFieldInformation::Native => panic!("Can't extract for native struct"), StructFieldInformation::Declared(fields) => { fields.iter().map(|f| Field::new(m, f)).collect() } }; Struct { name: m.identifier_at(handle.name).to_owned(), abilities: handle.abilities, type_parameters: handle.type_parameters.clone(), fields, } } } impl FunctionSignature { /// Create a `FunctionSignature` for `FunctionHandle` `f` in module `m`. pub fn new(m: &CompiledModule, f: &FunctionHandle) -> Self { FunctionSignature { name: m.identifier_at(f.name).to_owned(), type_parameters: f.type_parameters.clone(), formals: m .signature_at(f.parameters) .0 .iter() .map(|s| Type::new(m, s)) .collect(), ret: m .signature_at(f.return_) .0 .iter() .map(|s| Type::new(m, s)) .collect(), } } }
} /// Return true if `self` is a closed type with no free type variables pub fn is_closed(&self) -> bool {
random_line_split
normalized.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ access::ModuleAccess, file_format::{ AbilitySet, CompiledModule, FieldDefinition, FunctionHandle, SignatureToken, StructDefinition, StructFieldInformation, TypeParameterIndex, Visibility, }, }; use move_core_types::{ account_address::AccountAddress, identifier::Identifier, language_storage::{ModuleId, StructTag, TypeTag}, }; use std::collections::BTreeMap; /// Defines normalized representations of Move types, fields, kinds, structs, functions, and /// modules. These representations are useful in situations that require require comparing /// functions, resources, and types across modules. This arises in linking, compatibility checks /// (e.g., "is it safe to deploy this new module without updating its dependents and/or restarting /// genesis?"), defining schemas for resources stored on-chain, and (possibly in the future) /// allowing module updates transactions. /// A normalized version of `SignatureToken`, a type expression appearing in struct or function /// declarations. Unlike `SignatureToken`s, `normalized::Type`s from different modules can safely be /// compared. #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] pub enum Type { Bool, U8, U64, U128, Address, Signer, Struct { address: AccountAddress, module: Identifier, name: Identifier, type_arguments: Vec<Type>, }, Vector(Box<Type>), TypeParameter(TypeParameterIndex), Reference(Box<Type>), MutableReference(Box<Type>), } /// Normalized version of a `FieldDefinition`. The `name` is included even though it is /// metadata that it is ignored by the VM. The reason: names are important to clients. We would /// want a change from `Account { bal: u64, seq: u64 }` to `Account { seq: u64, bal: u64 }` to be /// marked as incompatible. Not safe to compare without an enclosing `Struct`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Field { pub name: Identifier, pub type_: Type, } /// Normalized version of a `StructDefinition`. Not safe to compare without an associated /// `ModuleId` or `Module`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Struct { pub name: Identifier, pub abilities: AbilitySet, pub type_parameters: Vec<AbilitySet>, pub fields: Vec<Field>, } /// Normalized version of a `FunctionDefinition`. Not safe to compare without an associated /// `ModuleId` or `Module`. #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] pub struct FunctionSignature { pub name: Identifier, pub type_parameters: Vec<AbilitySet>, pub formals: Vec<Type>, pub ret: Vec<Type>, } /// Normalized version of a `CompiledModule`: its address, name, struct declarations, and public /// function declarations. #[derive(Clone, Debug, Eq, PartialEq)] pub struct
{ pub address: AccountAddress, pub name: Identifier, pub friends: Vec<ModuleId>, pub structs: Vec<Struct>, pub exposed_functions: BTreeMap<FunctionSignature, Visibility>, } impl Module { /// Extract a normalized module from a `CompiledModule`. The module `m` should be verified. /// Nothing will break here if that is not the case, but there is little point in computing a /// normalized representation of a module that won't verify (since it can't be published). pub fn new(m: &CompiledModule) -> Self { let friends = m.immediate_friends(); let structs = m.struct_defs().iter().map(|d| Struct::new(m, d)).collect(); let exposed_functions = m .function_defs() .iter() .filter(|func_def| match func_def.visibility { Visibility::Public | Visibility::Script | Visibility::Friend => true, Visibility::Private => false, }) .map(|func_def| { ( FunctionSignature::new(m, m.function_handle_at(func_def.function)), func_def.visibility, ) }) .collect(); Self { address: *m.address(), name: m.name().to_owned(), friends, structs, exposed_functions, } } pub fn module_id(&self) -> ModuleId { ModuleId::new(self.address, self.name.clone()) } } impl Type { /// Create a normalized `Type` for `SignatureToken` `s` in module `m`. pub fn new(m: &CompiledModule, s: &SignatureToken) -> Self { use SignatureToken::*; match s { Struct(shi) => { let s_handle = m.struct_handle_at(*shi); assert!(s_handle.type_parameters.is_empty(), "A struct with N type parameters should be encoded as StructModuleInstantiation with type_arguments = [TypeParameter(1),..., TypeParameter(N)]"); let m_handle = m.module_handle_at(s_handle.module); Type::Struct { address: *m.address_identifier_at(m_handle.address), module: m.identifier_at(m_handle.name).to_owned(), name: m.identifier_at(s_handle.name).to_owned(), type_arguments: Vec::new(), } } StructInstantiation(shi, type_actuals) => { let s_handle = m.struct_handle_at(*shi); let m_handle = m.module_handle_at(s_handle.module); Type::Struct { address: *m.address_identifier_at(m_handle.address), module: m.identifier_at(m_handle.name).to_owned(), name: m.identifier_at(s_handle.name).to_owned(), type_arguments: type_actuals.iter().map(|t| Type::new(m, t)).collect(), } } Bool => Type::Bool, U8 => Type::U8, U64 => Type::U64, U128 => Type::U128, Address => Type::Address, Signer => Type::Signer, Vector(t) => Type::Vector(Box::new(Type::new(m, t))), TypeParameter(i) => Type::TypeParameter(*i), Reference(t) => Type::Reference(Box::new(Type::new(m, t))), MutableReference(t) => Type::MutableReference(Box::new(Type::new(m, t))), } } /// Return true if `self` is a closed type with no free type variables pub fn is_closed(&self) -> bool { use Type::*; match self { TypeParameter(_) => false, Bool => true, U8 => true, U64 => true, U128 => true, Address => true, Signer => true, Struct { type_arguments,.. } => type_arguments.iter().all(|t| t.is_closed()), Vector(t) | Reference(t) | MutableReference(t) => t.is_closed(), } } pub fn into_type_tag(self) -> Option<TypeTag> { use Type::*; Some(if self.is_closed() { match self { Reference(_) | MutableReference(_) => return None, Bool => TypeTag::Bool, U8 => TypeTag::U8, U64 => TypeTag::U64, U128 => TypeTag::U128, Address => TypeTag::Address, Signer => TypeTag::Signer, Vector(t) => TypeTag::Vector(Box::new( t.into_type_tag() .expect("Invariant violation: vector type argument contains reference"), )), Struct { address, module, name, type_arguments, } => TypeTag::Struct(StructTag { address, module, name, type_params: type_arguments .into_iter() .map(|t| { t.into_type_tag().expect( "Invariant violation: struct type argument contains reference", ) }) .collect(), }), TypeParameter(_) => unreachable!(), } } else { return None; }) } } impl Field { /// Create a `Field` for `FieldDefinition` `f` in module `m`. pub fn new(m: &CompiledModule, f: &FieldDefinition) -> Self { Field { name: m.identifier_at(f.name).to_owned(), type_: Type::new(m, &f.signature.0), } } } impl Struct { /// Create a `Struct` for `StructDefinition` `def` in module `m`. Panics if `def` is a /// a native struct definition. pub fn new(m: &CompiledModule, def: &StructDefinition) -> Self { let handle = m.struct_handle_at(def.struct_handle); let fields = match &def.field_information { StructFieldInformation::Native => panic!("Can't extract for native struct"), StructFieldInformation::Declared(fields) => { fields.iter().map(|f| Field::new(m, f)).collect() } }; Struct { name: m.identifier_at(handle.name).to_owned(), abilities: handle.abilities, type_parameters: handle.type_parameters.clone(), fields, } } } impl FunctionSignature { /// Create a `FunctionSignature` for `FunctionHandle` `f` in module `m`. pub fn new(m: &CompiledModule, f: &FunctionHandle) -> Self { FunctionSignature { name: m.identifier_at(f.name).to_owned(), type_parameters: f.type_parameters.clone(), formals: m .signature_at(f.parameters) .0 .iter() .map(|s| Type::new(m, s)) .collect(), ret: m .signature_at(f.return_) .0 .iter() .map(|s| Type::new(m, s)) .collect(), } } }
Module
identifier_name
normalized.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ access::ModuleAccess, file_format::{ AbilitySet, CompiledModule, FieldDefinition, FunctionHandle, SignatureToken, StructDefinition, StructFieldInformation, TypeParameterIndex, Visibility, }, }; use move_core_types::{ account_address::AccountAddress, identifier::Identifier, language_storage::{ModuleId, StructTag, TypeTag}, }; use std::collections::BTreeMap; /// Defines normalized representations of Move types, fields, kinds, structs, functions, and /// modules. These representations are useful in situations that require require comparing /// functions, resources, and types across modules. This arises in linking, compatibility checks /// (e.g., "is it safe to deploy this new module without updating its dependents and/or restarting /// genesis?"), defining schemas for resources stored on-chain, and (possibly in the future) /// allowing module updates transactions. /// A normalized version of `SignatureToken`, a type expression appearing in struct or function /// declarations. Unlike `SignatureToken`s, `normalized::Type`s from different modules can safely be /// compared. #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] pub enum Type { Bool, U8, U64, U128, Address, Signer, Struct { address: AccountAddress, module: Identifier, name: Identifier, type_arguments: Vec<Type>, }, Vector(Box<Type>), TypeParameter(TypeParameterIndex), Reference(Box<Type>), MutableReference(Box<Type>), } /// Normalized version of a `FieldDefinition`. The `name` is included even though it is /// metadata that it is ignored by the VM. The reason: names are important to clients. We would /// want a change from `Account { bal: u64, seq: u64 }` to `Account { seq: u64, bal: u64 }` to be /// marked as incompatible. Not safe to compare without an enclosing `Struct`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Field { pub name: Identifier, pub type_: Type, } /// Normalized version of a `StructDefinition`. Not safe to compare without an associated /// `ModuleId` or `Module`. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Struct { pub name: Identifier, pub abilities: AbilitySet, pub type_parameters: Vec<AbilitySet>, pub fields: Vec<Field>, } /// Normalized version of a `FunctionDefinition`. Not safe to compare without an associated /// `ModuleId` or `Module`. #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] pub struct FunctionSignature { pub name: Identifier, pub type_parameters: Vec<AbilitySet>, pub formals: Vec<Type>, pub ret: Vec<Type>, } /// Normalized version of a `CompiledModule`: its address, name, struct declarations, and public /// function declarations. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Module { pub address: AccountAddress, pub name: Identifier, pub friends: Vec<ModuleId>, pub structs: Vec<Struct>, pub exposed_functions: BTreeMap<FunctionSignature, Visibility>, } impl Module { /// Extract a normalized module from a `CompiledModule`. The module `m` should be verified. /// Nothing will break here if that is not the case, but there is little point in computing a /// normalized representation of a module that won't verify (since it can't be published). pub fn new(m: &CompiledModule) -> Self { let friends = m.immediate_friends(); let structs = m.struct_defs().iter().map(|d| Struct::new(m, d)).collect(); let exposed_functions = m .function_defs() .iter() .filter(|func_def| match func_def.visibility { Visibility::Public | Visibility::Script | Visibility::Friend => true, Visibility::Private => false, }) .map(|func_def| { ( FunctionSignature::new(m, m.function_handle_at(func_def.function)), func_def.visibility, ) }) .collect(); Self { address: *m.address(), name: m.name().to_owned(), friends, structs, exposed_functions, } } pub fn module_id(&self) -> ModuleId
} impl Type { /// Create a normalized `Type` for `SignatureToken` `s` in module `m`. pub fn new(m: &CompiledModule, s: &SignatureToken) -> Self { use SignatureToken::*; match s { Struct(shi) => { let s_handle = m.struct_handle_at(*shi); assert!(s_handle.type_parameters.is_empty(), "A struct with N type parameters should be encoded as StructModuleInstantiation with type_arguments = [TypeParameter(1),..., TypeParameter(N)]"); let m_handle = m.module_handle_at(s_handle.module); Type::Struct { address: *m.address_identifier_at(m_handle.address), module: m.identifier_at(m_handle.name).to_owned(), name: m.identifier_at(s_handle.name).to_owned(), type_arguments: Vec::new(), } } StructInstantiation(shi, type_actuals) => { let s_handle = m.struct_handle_at(*shi); let m_handle = m.module_handle_at(s_handle.module); Type::Struct { address: *m.address_identifier_at(m_handle.address), module: m.identifier_at(m_handle.name).to_owned(), name: m.identifier_at(s_handle.name).to_owned(), type_arguments: type_actuals.iter().map(|t| Type::new(m, t)).collect(), } } Bool => Type::Bool, U8 => Type::U8, U64 => Type::U64, U128 => Type::U128, Address => Type::Address, Signer => Type::Signer, Vector(t) => Type::Vector(Box::new(Type::new(m, t))), TypeParameter(i) => Type::TypeParameter(*i), Reference(t) => Type::Reference(Box::new(Type::new(m, t))), MutableReference(t) => Type::MutableReference(Box::new(Type::new(m, t))), } } /// Return true if `self` is a closed type with no free type variables pub fn is_closed(&self) -> bool { use Type::*; match self { TypeParameter(_) => false, Bool => true, U8 => true, U64 => true, U128 => true, Address => true, Signer => true, Struct { type_arguments,.. } => type_arguments.iter().all(|t| t.is_closed()), Vector(t) | Reference(t) | MutableReference(t) => t.is_closed(), } } pub fn into_type_tag(self) -> Option<TypeTag> { use Type::*; Some(if self.is_closed() { match self { Reference(_) | MutableReference(_) => return None, Bool => TypeTag::Bool, U8 => TypeTag::U8, U64 => TypeTag::U64, U128 => TypeTag::U128, Address => TypeTag::Address, Signer => TypeTag::Signer, Vector(t) => TypeTag::Vector(Box::new( t.into_type_tag() .expect("Invariant violation: vector type argument contains reference"), )), Struct { address, module, name, type_arguments, } => TypeTag::Struct(StructTag { address, module, name, type_params: type_arguments .into_iter() .map(|t| { t.into_type_tag().expect( "Invariant violation: struct type argument contains reference", ) }) .collect(), }), TypeParameter(_) => unreachable!(), } } else { return None; }) } } impl Field { /// Create a `Field` for `FieldDefinition` `f` in module `m`. pub fn new(m: &CompiledModule, f: &FieldDefinition) -> Self { Field { name: m.identifier_at(f.name).to_owned(), type_: Type::new(m, &f.signature.0), } } } impl Struct { /// Create a `Struct` for `StructDefinition` `def` in module `m`. Panics if `def` is a /// a native struct definition. pub fn new(m: &CompiledModule, def: &StructDefinition) -> Self { let handle = m.struct_handle_at(def.struct_handle); let fields = match &def.field_information { StructFieldInformation::Native => panic!("Can't extract for native struct"), StructFieldInformation::Declared(fields) => { fields.iter().map(|f| Field::new(m, f)).collect() } }; Struct { name: m.identifier_at(handle.name).to_owned(), abilities: handle.abilities, type_parameters: handle.type_parameters.clone(), fields, } } } impl FunctionSignature { /// Create a `FunctionSignature` for `FunctionHandle` `f` in module `m`. pub fn new(m: &CompiledModule, f: &FunctionHandle) -> Self { FunctionSignature { name: m.identifier_at(f.name).to_owned(), type_parameters: f.type_parameters.clone(), formals: m .signature_at(f.parameters) .0 .iter() .map(|s| Type::new(m, s)) .collect(), ret: m .signature_at(f.return_) .0 .iter() .map(|s| Type::new(m, s)) .collect(), } } }
{ ModuleId::new(self.address, self.name.clone()) }
identifier_body
shared_lock.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Different objects protected by the same lock #[cfg(feature = "gecko")] use atomic_refcell::{AtomicRefCell, AtomicRef, AtomicRefMut}; #[cfg(feature = "servo")] use parking_lot::RwLock; use std::cell::UnsafeCell; use std::fmt; use std::sync::Arc; /// A shared read/write lock that can protect multiple objects. /// /// In Gecko builds, we don't need the blocking behavior, just the safety. As /// such we implement this with an AtomicRefCell instead in Gecko builds, /// which is ~2x as fast, and panics (rather than deadlocking) when things go /// wrong (which is much easier to debug on CI). /// /// Servo needs the blocking behavior for its unsynchronized animation setup, /// but that may not be web-compatible and may need to be changed (at which /// point Servo could use AtomicRefCell too). #[derive(Clone)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct SharedRwLock { #[cfg(feature = "servo")] #[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")] arc: Arc<RwLock<()>>, #[cfg(feature = "gecko")] cell: Arc<AtomicRefCell<SomethingZeroSizedButTyped>>, } #[cfg(feature = "gecko")] struct SomethingZeroSizedButTyped; impl fmt::Debug for SharedRwLock { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("SharedRwLock") } } impl SharedRwLock { /// Create a new shared lock (servo). #[cfg(feature = "servo")] pub fn new() -> Self { SharedRwLock { arc: Arc::new(RwLock::new(())) } } /// Create a new shared lock (gecko). #[cfg(feature = "gecko")] pub fn new() -> Self { SharedRwLock { cell: Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped)) } } /// Wrap the given data to make its access protected by this lock. pub fn wrap<T>(&self, data: T) -> Locked<T> { Locked { shared_lock: self.clone(), data: UnsafeCell::new(data), } } /// Obtain the lock for reading (servo). #[cfg(feature = "servo")] pub fn read(&self) -> SharedRwLockReadGuard { self.arc.raw_read(); SharedRwLockReadGuard(self) } /// Obtain the lock for reading (gecko). #[cfg(feature = "gecko")] pub fn read(&self) -> SharedRwLockReadGuard { SharedRwLockReadGuard(self.cell.borrow()) } /// Obtain the lock for writing (servo). #[cfg(feature = "servo")] pub fn write(&self) -> SharedRwLockWriteGuard { self.arc.raw_write(); SharedRwLockWriteGuard(self) } /// Obtain the lock for writing (gecko). #[cfg(feature = "gecko")] pub fn
(&self) -> SharedRwLockWriteGuard { SharedRwLockWriteGuard(self.cell.borrow_mut()) } } /// Proof that a shared lock was obtained for reading (servo). #[cfg(feature = "servo")] pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock); /// Proof that a shared lock was obtained for writing (gecko). #[cfg(feature = "gecko")] pub struct SharedRwLockReadGuard<'a>(AtomicRef<'a, SomethingZeroSizedButTyped>); #[cfg(feature = "servo")] impl<'a> Drop for SharedRwLockReadGuard<'a> { fn drop(&mut self) { // Unsafe: self.lock is private to this module, only ever set after `raw_read()`, // and never copied or cloned (see `compile_time_assert` below). unsafe { self.0.arc.raw_unlock_read() } } } /// Proof that a shared lock was obtained for writing (servo). #[cfg(feature = "servo")] pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock); /// Proof that a shared lock was obtained for writing (gecko). #[cfg(feature = "gecko")] pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>); #[cfg(feature = "servo")] impl<'a> Drop for SharedRwLockWriteGuard<'a> { fn drop(&mut self) { // Unsafe: self.lock is private to this module, only ever set after `raw_write()`, // and never copied or cloned (see `compile_time_assert` below). unsafe { self.0.arc.raw_unlock_write() } } } /// Data protect by a shared lock. pub struct Locked<T> { shared_lock: SharedRwLock, data: UnsafeCell<T>, } // Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`, // where guards ensure synchronization. unsafe impl<T: Send> Send for Locked<T> {} unsafe impl<T: Send + Sync> Sync for Locked<T> {} impl<T: fmt::Debug> fmt::Debug for Locked<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let guard = self.shared_lock.read(); self.read_with(&guard).fmt(f) } } impl<T> Locked<T> { #[cfg(feature = "servo")] fn same_lock_as(&self, lock: &SharedRwLock) -> bool { ::arc_ptr_eq(&self.shared_lock.arc, &lock.arc) } #[cfg(feature = "gecko")] fn same_lock_as(&self, derefed_guard: &SomethingZeroSizedButTyped) -> bool { ::ptr_eq(self.shared_lock.cell.as_ptr(), derefed_guard) } /// Access the data for reading. pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T { assert!(self.same_lock_as(&guard.0), "Locked::read_with called with a guard from an unrelated SharedRwLock"); let ptr = self.data.get(); // Unsafe: // // * The guard guarantees that the lock is taken for reading, // and we’ve checked that it’s the correct lock. // * The returned reference borrows *both* the data and the guard, // so that it can outlive neither. unsafe { &*ptr } } /// Access the data for writing. pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T { assert!(self.same_lock_as(&guard.0), "Locked::write_with called with a guard from an unrelated SharedRwLock"); let ptr = self.data.get(); // Unsafe: // // * The guard guarantees that the lock is taken for writing, // and we’ve checked that it’s the correct lock. // * The returned reference borrows *both* the data and the guard, // so that it can outlive neither. // * We require a mutable borrow of the guard, // so that one write guard can only be used once at a time. unsafe { &mut *ptr } } } #[allow(dead_code)] mod compile_time_assert { use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard}; trait Marker1 {} impl<T: Clone> Marker1 for T {} impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Clone impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Clone trait Marker2 {} impl<T: Copy> Marker2 for T {} impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Copy impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Copy } /// Like ToCss, but with a lock guard given by the caller. pub trait ToCssWithGuard { /// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard. fn to_css<W>(&self, guard: &SharedRwLockReadGuard, dest: &mut W) -> fmt::Result where W: fmt::Write; /// Serialize `self` in CSS syntax using the given lock guard and return a string. /// /// (This is a convenience wrapper for `to_css` and probably should not be overridden.) #[inline] fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> String { let mut s = String::new(); self.to_css(guard, &mut s).unwrap(); s } } /// Guards for a document #[derive(Clone)] pub struct StylesheetGuards<'a> { /// For author-origin stylesheets pub author: &'a SharedRwLockReadGuard<'a>, /// For user-agent-origin and user-origin stylesheets pub ua_or_user: &'a SharedRwLockReadGuard<'a>, } impl<'a> StylesheetGuards<'a> { /// Same guard for all origins pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self { StylesheetGuards { author: guard, ua_or_user: guard, } } }
write
identifier_name
shared_lock.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Different objects protected by the same lock #[cfg(feature = "gecko")] use atomic_refcell::{AtomicRefCell, AtomicRef, AtomicRefMut}; #[cfg(feature = "servo")] use parking_lot::RwLock; use std::cell::UnsafeCell; use std::fmt; use std::sync::Arc; /// A shared read/write lock that can protect multiple objects. /// /// In Gecko builds, we don't need the blocking behavior, just the safety. As /// such we implement this with an AtomicRefCell instead in Gecko builds, /// which is ~2x as fast, and panics (rather than deadlocking) when things go /// wrong (which is much easier to debug on CI). /// /// Servo needs the blocking behavior for its unsynchronized animation setup, /// but that may not be web-compatible and may need to be changed (at which /// point Servo could use AtomicRefCell too). #[derive(Clone)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct SharedRwLock { #[cfg(feature = "servo")] #[cfg_attr(feature = "servo", ignore_heap_size_of = "Arc")] arc: Arc<RwLock<()>>, #[cfg(feature = "gecko")] cell: Arc<AtomicRefCell<SomethingZeroSizedButTyped>>, } #[cfg(feature = "gecko")] struct SomethingZeroSizedButTyped; impl fmt::Debug for SharedRwLock { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("SharedRwLock") } } impl SharedRwLock { /// Create a new shared lock (servo). #[cfg(feature = "servo")] pub fn new() -> Self { SharedRwLock { arc: Arc::new(RwLock::new(())) } } /// Create a new shared lock (gecko). #[cfg(feature = "gecko")] pub fn new() -> Self { SharedRwLock { cell: Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped)) } } /// Wrap the given data to make its access protected by this lock. pub fn wrap<T>(&self, data: T) -> Locked<T> { Locked { shared_lock: self.clone(), data: UnsafeCell::new(data), } } /// Obtain the lock for reading (servo). #[cfg(feature = "servo")] pub fn read(&self) -> SharedRwLockReadGuard { self.arc.raw_read(); SharedRwLockReadGuard(self) } /// Obtain the lock for reading (gecko). #[cfg(feature = "gecko")] pub fn read(&self) -> SharedRwLockReadGuard { SharedRwLockReadGuard(self.cell.borrow()) } /// Obtain the lock for writing (servo). #[cfg(feature = "servo")] pub fn write(&self) -> SharedRwLockWriteGuard { self.arc.raw_write(); SharedRwLockWriteGuard(self) } /// Obtain the lock for writing (gecko). #[cfg(feature = "gecko")] pub fn write(&self) -> SharedRwLockWriteGuard { SharedRwLockWriteGuard(self.cell.borrow_mut()) } } /// Proof that a shared lock was obtained for reading (servo). #[cfg(feature = "servo")] pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock); /// Proof that a shared lock was obtained for writing (gecko). #[cfg(feature = "gecko")] pub struct SharedRwLockReadGuard<'a>(AtomicRef<'a, SomethingZeroSizedButTyped>); #[cfg(feature = "servo")] impl<'a> Drop for SharedRwLockReadGuard<'a> { fn drop(&mut self) { // Unsafe: self.lock is private to this module, only ever set after `raw_read()`, // and never copied or cloned (see `compile_time_assert` below). unsafe { self.0.arc.raw_unlock_read() } } } /// Proof that a shared lock was obtained for writing (servo). #[cfg(feature = "servo")] pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock); /// Proof that a shared lock was obtained for writing (gecko). #[cfg(feature = "gecko")] pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>); #[cfg(feature = "servo")] impl<'a> Drop for SharedRwLockWriteGuard<'a> { fn drop(&mut self) { // Unsafe: self.lock is private to this module, only ever set after `raw_write()`, // and never copied or cloned (see `compile_time_assert` below). unsafe { self.0.arc.raw_unlock_write() } } } /// Data protect by a shared lock. pub struct Locked<T> { shared_lock: SharedRwLock, data: UnsafeCell<T>, } // Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`, // where guards ensure synchronization.
unsafe impl<T: Send + Sync> Sync for Locked<T> {} impl<T: fmt::Debug> fmt::Debug for Locked<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let guard = self.shared_lock.read(); self.read_with(&guard).fmt(f) } } impl<T> Locked<T> { #[cfg(feature = "servo")] fn same_lock_as(&self, lock: &SharedRwLock) -> bool { ::arc_ptr_eq(&self.shared_lock.arc, &lock.arc) } #[cfg(feature = "gecko")] fn same_lock_as(&self, derefed_guard: &SomethingZeroSizedButTyped) -> bool { ::ptr_eq(self.shared_lock.cell.as_ptr(), derefed_guard) } /// Access the data for reading. pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T { assert!(self.same_lock_as(&guard.0), "Locked::read_with called with a guard from an unrelated SharedRwLock"); let ptr = self.data.get(); // Unsafe: // // * The guard guarantees that the lock is taken for reading, // and we’ve checked that it’s the correct lock. // * The returned reference borrows *both* the data and the guard, // so that it can outlive neither. unsafe { &*ptr } } /// Access the data for writing. pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T { assert!(self.same_lock_as(&guard.0), "Locked::write_with called with a guard from an unrelated SharedRwLock"); let ptr = self.data.get(); // Unsafe: // // * The guard guarantees that the lock is taken for writing, // and we’ve checked that it’s the correct lock. // * The returned reference borrows *both* the data and the guard, // so that it can outlive neither. // * We require a mutable borrow of the guard, // so that one write guard can only be used once at a time. unsafe { &mut *ptr } } } #[allow(dead_code)] mod compile_time_assert { use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard}; trait Marker1 {} impl<T: Clone> Marker1 for T {} impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Clone impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Clone trait Marker2 {} impl<T: Copy> Marker2 for T {} impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Copy impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Copy } /// Like ToCss, but with a lock guard given by the caller. pub trait ToCssWithGuard { /// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard. fn to_css<W>(&self, guard: &SharedRwLockReadGuard, dest: &mut W) -> fmt::Result where W: fmt::Write; /// Serialize `self` in CSS syntax using the given lock guard and return a string. /// /// (This is a convenience wrapper for `to_css` and probably should not be overridden.) #[inline] fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> String { let mut s = String::new(); self.to_css(guard, &mut s).unwrap(); s } } /// Guards for a document #[derive(Clone)] pub struct StylesheetGuards<'a> { /// For author-origin stylesheets pub author: &'a SharedRwLockReadGuard<'a>, /// For user-agent-origin and user-origin stylesheets pub ua_or_user: &'a SharedRwLockReadGuard<'a>, } impl<'a> StylesheetGuards<'a> { /// Same guard for all origins pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self { StylesheetGuards { author: guard, ua_or_user: guard, } } }
unsafe impl<T: Send> Send for Locked<T> {}
random_line_split
commands.rs
/* * Copyright (c) 2016-2018 Boucher, Antoni <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #[derive(Commands)] pub enum AppCommand { #[completion(hidden)] ActivateSelection, #[help(text="Update the host file used by the adblocker")] AdblockUpdate, #[help(text="Add a new user agent")] AddUserAgent(String), #[help(text="Go back in the history")] Back, #[special_command(incremental, identifier="?")] BackwardSearch(String), #[help(text="Add the current page to the bookmarks")] Bookmark, #[help(text="Delete the current page from the bookmarks")] BookmarkDel, #[help(text="Edit the bookmark tags of the current page")] BookmarkEditTags, #[help(text="Clear the browser cache")] ClearCache, #[help(text="Try to click link to next page if it exists")] ClickNextPage, #[help(text="Try to click link to the previous page if it exists")] ClickPrevPage, #[completion(hidden)] CopyLinkUrl, #[completion(hidden)] CopyUrl, #[help(text="Delete all the cookies")] DeleteAllCookies, #[help(text="Delete the cookies for the specified domain")] DeleteCookies(String), #[completion(hidden)] DeleteSelectedBookmark, #[completion(hidden)] FinishSearch, #[completion(hidden)] FocusInput, #[completion(hidden)] Follow, #[help(text="Go forward in the history")] Forward, #[completion(hidden)] GoMark(String), #[count] #[help(text="Go up one directory in url")] GoParentDir(Option<u32>), #[help(text="Go to root directory of url")] GoRootDir, #[completion(hidden)] HideHints, #[completion(hidden)] Hover, #[completion(hidden)] Insert, #[help(text="Open the web inspector")] Inspector, #[help(text="Kill the webview without confirmation")] KillWin, #[completion(hidden)] Mark(String), #[completion(hidden)] Normal, #[help(text="Open an URL")] Open(String), #[help(text="Delete the credentials for the current URL")] PasswordDelete, #[help(text="Insert a password in the focused text input")] PasswordInsert, #[help(text="Insert a password in the focused text input and submit the form")] PasswordInsertSubmit, #[help(text="Load the credentials in the login form")] PasswordLoad, #[help(text="Save the credentials from the login form")] PasswordSave, #[help(text="Load the credentials in the login form and submit the form")] PasswordSubmit, #[completion(hidden)] PasteUrl, #[help(text="Print the current page")] Print, #[help(text="Open an URL in a new private window")] PrivateWinOpen(String), #[help(text="Quit the application")] Quit, #[help(text="Reload the current page")] Reload, #[help(text="Reload the current page without using the cache")] ReloadBypassCache, #[help(text="Restore the opened pages after a crash")] RestoreUrls, #[completion(hidden)] SaveLink, #[completion(hidden)] SearchEngine(String), #[completion(hidden)] Screenshot(String), #[count] #[completion(hidden)] ScrollTo(Option<u32>), #[completion(hidden)] ScrollDown, #[completion(hidden)] ScrollDownHalf, #[completion(hidden)] ScrollDownLine, #[completion(hidden)] ScrollLeft, #[completion(hidden)] ScrollRight, #[completion(hidden)] ScrollTop, #[completion(hidden)] ScrollUp, #[completion(hidden)] ScrollUpHalf, #[completion(hidden)] ScrollUpLine, #[special_command(incremental, identifier="/")] Search(String), #[completion(hidden)] SearchNext, #[completion(hidden)] SearchPrevious, #[help(text="Select a user agent by name")] SelectUserAgent(String),
#[completion(hidden)] UrlIncrement, #[completion(hidden)] UrlDecrement, #[completion(hidden)] WinFollow, #[help(text="Open an URL in a new window")] WinOpen(String), #[completion(hidden)] WinPasteUrl, #[help(text="Zoom the current page in")] ZoomIn, #[help(text="Zoom the current page to 100%")] ZoomNormal, #[help(text="Zoom the current page out")] ZoomOut, }
#[help(text="Stop loading the current page")] Stop,
random_line_split
commands.rs
/* * Copyright (c) 2016-2018 Boucher, Antoni <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #[derive(Commands)] pub enum
{ #[completion(hidden)] ActivateSelection, #[help(text="Update the host file used by the adblocker")] AdblockUpdate, #[help(text="Add a new user agent")] AddUserAgent(String), #[help(text="Go back in the history")] Back, #[special_command(incremental, identifier="?")] BackwardSearch(String), #[help(text="Add the current page to the bookmarks")] Bookmark, #[help(text="Delete the current page from the bookmarks")] BookmarkDel, #[help(text="Edit the bookmark tags of the current page")] BookmarkEditTags, #[help(text="Clear the browser cache")] ClearCache, #[help(text="Try to click link to next page if it exists")] ClickNextPage, #[help(text="Try to click link to the previous page if it exists")] ClickPrevPage, #[completion(hidden)] CopyLinkUrl, #[completion(hidden)] CopyUrl, #[help(text="Delete all the cookies")] DeleteAllCookies, #[help(text="Delete the cookies for the specified domain")] DeleteCookies(String), #[completion(hidden)] DeleteSelectedBookmark, #[completion(hidden)] FinishSearch, #[completion(hidden)] FocusInput, #[completion(hidden)] Follow, #[help(text="Go forward in the history")] Forward, #[completion(hidden)] GoMark(String), #[count] #[help(text="Go up one directory in url")] GoParentDir(Option<u32>), #[help(text="Go to root directory of url")] GoRootDir, #[completion(hidden)] HideHints, #[completion(hidden)] Hover, #[completion(hidden)] Insert, #[help(text="Open the web inspector")] Inspector, #[help(text="Kill the webview without confirmation")] KillWin, #[completion(hidden)] Mark(String), #[completion(hidden)] Normal, #[help(text="Open an URL")] Open(String), #[help(text="Delete the credentials for the current URL")] PasswordDelete, #[help(text="Insert a password in the focused text input")] PasswordInsert, #[help(text="Insert a password in the focused text input and submit the form")] PasswordInsertSubmit, #[help(text="Load the credentials in the login form")] PasswordLoad, #[help(text="Save the credentials from the login form")] PasswordSave, #[help(text="Load the credentials in the login form and submit the form")] PasswordSubmit, #[completion(hidden)] PasteUrl, #[help(text="Print the current page")] Print, #[help(text="Open an URL in a new private window")] PrivateWinOpen(String), #[help(text="Quit the application")] Quit, #[help(text="Reload the current page")] Reload, #[help(text="Reload the current page without using the cache")] ReloadBypassCache, #[help(text="Restore the opened pages after a crash")] RestoreUrls, #[completion(hidden)] SaveLink, #[completion(hidden)] SearchEngine(String), #[completion(hidden)] Screenshot(String), #[count] #[completion(hidden)] ScrollTo(Option<u32>), #[completion(hidden)] ScrollDown, #[completion(hidden)] ScrollDownHalf, #[completion(hidden)] ScrollDownLine, #[completion(hidden)] ScrollLeft, #[completion(hidden)] ScrollRight, #[completion(hidden)] ScrollTop, #[completion(hidden)] ScrollUp, #[completion(hidden)] ScrollUpHalf, #[completion(hidden)] ScrollUpLine, #[special_command(incremental, identifier="/")] Search(String), #[completion(hidden)] SearchNext, #[completion(hidden)] SearchPrevious, #[help(text="Select a user agent by name")] SelectUserAgent(String), #[help(text="Stop loading the current page")] Stop, #[completion(hidden)] UrlIncrement, #[completion(hidden)] UrlDecrement, #[completion(hidden)] WinFollow, #[help(text="Open an URL in a new window")] WinOpen(String), #[completion(hidden)] WinPasteUrl, #[help(text="Zoom the current page in")] ZoomIn, #[help(text="Zoom the current page to 100%")] ZoomNormal, #[help(text="Zoom the current page out")] ZoomOut, }
AppCommand
identifier_name
incremental_get.rs
use test::{black_box, Bencher}; use engine_rocks::RocksSnapshot; use kvproto::kvrpcpb::{Context, IsolationLevel}; use std::sync::Arc; use test_storage::SyncTestStorageBuilder; use tidb_query_datatype::codec::table; use tikv::storage::{Engine, SnapshotStore, Statistics, Store}; use txn_types::{Key, Mutation}; fn table_lookup_gen_data() -> (SnapshotStore<Arc<RocksSnapshot>>, Vec<Key>) { let store = SyncTestStorageBuilder::new().build().unwrap(); let mut mutations = Vec::new(); let mut keys = Vec::new(); for i in 0..30000 { let user_key = table::encode_row_key(5, i);
let mutation = Mutation::Put((key.clone(), user_value)); mutations.push(mutation); keys.push(key); } let pk = table::encode_row_key(5, 0); store .prewrite(Context::default(), mutations, pk, 1) .unwrap(); store.commit(Context::default(), keys, 1, 2).unwrap(); let engine = store.get_engine(); let db = engine.get_rocksdb().get_sync_db(); db.compact_range_cf(db.cf_handle("write").unwrap(), None, None); db.compact_range_cf(db.cf_handle("default").unwrap(), None, None); db.compact_range_cf(db.cf_handle("lock").unwrap(), None, None); let snapshot = engine.snapshot(Default::default()).unwrap(); let store = SnapshotStore::new( snapshot, 10.into(), IsolationLevel::Si, true, Default::default(), false, ); // Keys are given in order, and are far away from each other to simulate a normal table lookup // scenario. let mut get_keys = Vec::new(); for i in (0..30000).step_by(30) { get_keys.push(Key::from_raw(&table::encode_row_key(5, i))); } (store, get_keys) } #[bench] fn bench_table_lookup_mvcc_get(b: &mut Bencher) { let (store, keys) = table_lookup_gen_data(); b.iter(|| { let mut stats = Statistics::default(); for key in &keys { black_box(store.get(key, &mut stats).unwrap()); } }); } #[bench] fn bench_table_lookup_mvcc_incremental_get(b: &mut Bencher) { let (mut store, keys) = table_lookup_gen_data(); b.iter(|| { for key in &keys { black_box(store.incremental_get(key).unwrap()); } }) }
let user_value = vec![b'x'; 60]; let key = Key::from_raw(&user_key);
random_line_split
incremental_get.rs
use test::{black_box, Bencher}; use engine_rocks::RocksSnapshot; use kvproto::kvrpcpb::{Context, IsolationLevel}; use std::sync::Arc; use test_storage::SyncTestStorageBuilder; use tidb_query_datatype::codec::table; use tikv::storage::{Engine, SnapshotStore, Statistics, Store}; use txn_types::{Key, Mutation}; fn table_lookup_gen_data() -> (SnapshotStore<Arc<RocksSnapshot>>, Vec<Key>) { let store = SyncTestStorageBuilder::new().build().unwrap(); let mut mutations = Vec::new(); let mut keys = Vec::new(); for i in 0..30000 { let user_key = table::encode_row_key(5, i); let user_value = vec![b'x'; 60]; let key = Key::from_raw(&user_key); let mutation = Mutation::Put((key.clone(), user_value)); mutations.push(mutation); keys.push(key); } let pk = table::encode_row_key(5, 0); store .prewrite(Context::default(), mutations, pk, 1) .unwrap(); store.commit(Context::default(), keys, 1, 2).unwrap(); let engine = store.get_engine(); let db = engine.get_rocksdb().get_sync_db(); db.compact_range_cf(db.cf_handle("write").unwrap(), None, None); db.compact_range_cf(db.cf_handle("default").unwrap(), None, None); db.compact_range_cf(db.cf_handle("lock").unwrap(), None, None); let snapshot = engine.snapshot(Default::default()).unwrap(); let store = SnapshotStore::new( snapshot, 10.into(), IsolationLevel::Si, true, Default::default(), false, ); // Keys are given in order, and are far away from each other to simulate a normal table lookup // scenario. let mut get_keys = Vec::new(); for i in (0..30000).step_by(30) { get_keys.push(Key::from_raw(&table::encode_row_key(5, i))); } (store, get_keys) } #[bench] fn bench_table_lookup_mvcc_get(b: &mut Bencher)
#[bench] fn bench_table_lookup_mvcc_incremental_get(b: &mut Bencher) { let (mut store, keys) = table_lookup_gen_data(); b.iter(|| { for key in &keys { black_box(store.incremental_get(key).unwrap()); } }) }
{ let (store, keys) = table_lookup_gen_data(); b.iter(|| { let mut stats = Statistics::default(); for key in &keys { black_box(store.get(key, &mut stats).unwrap()); } }); }
identifier_body
incremental_get.rs
use test::{black_box, Bencher}; use engine_rocks::RocksSnapshot; use kvproto::kvrpcpb::{Context, IsolationLevel}; use std::sync::Arc; use test_storage::SyncTestStorageBuilder; use tidb_query_datatype::codec::table; use tikv::storage::{Engine, SnapshotStore, Statistics, Store}; use txn_types::{Key, Mutation}; fn table_lookup_gen_data() -> (SnapshotStore<Arc<RocksSnapshot>>, Vec<Key>) { let store = SyncTestStorageBuilder::new().build().unwrap(); let mut mutations = Vec::new(); let mut keys = Vec::new(); for i in 0..30000 { let user_key = table::encode_row_key(5, i); let user_value = vec![b'x'; 60]; let key = Key::from_raw(&user_key); let mutation = Mutation::Put((key.clone(), user_value)); mutations.push(mutation); keys.push(key); } let pk = table::encode_row_key(5, 0); store .prewrite(Context::default(), mutations, pk, 1) .unwrap(); store.commit(Context::default(), keys, 1, 2).unwrap(); let engine = store.get_engine(); let db = engine.get_rocksdb().get_sync_db(); db.compact_range_cf(db.cf_handle("write").unwrap(), None, None); db.compact_range_cf(db.cf_handle("default").unwrap(), None, None); db.compact_range_cf(db.cf_handle("lock").unwrap(), None, None); let snapshot = engine.snapshot(Default::default()).unwrap(); let store = SnapshotStore::new( snapshot, 10.into(), IsolationLevel::Si, true, Default::default(), false, ); // Keys are given in order, and are far away from each other to simulate a normal table lookup // scenario. let mut get_keys = Vec::new(); for i in (0..30000).step_by(30) { get_keys.push(Key::from_raw(&table::encode_row_key(5, i))); } (store, get_keys) } #[bench] fn
(b: &mut Bencher) { let (store, keys) = table_lookup_gen_data(); b.iter(|| { let mut stats = Statistics::default(); for key in &keys { black_box(store.get(key, &mut stats).unwrap()); } }); } #[bench] fn bench_table_lookup_mvcc_incremental_get(b: &mut Bencher) { let (mut store, keys) = table_lookup_gen_data(); b.iter(|| { for key in &keys { black_box(store.incremental_get(key).unwrap()); } }) }
bench_table_lookup_mvcc_get
identifier_name
buffer_map.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; use geom::size::Size2D; use layers::platform::surface::NativePaintingGraphicsContext; use layers::layers::LayerBuffer; use std::hash::{Hash, Hasher, Writer}; use std::mem; /// This is a struct used to store buffers when they are not in use. /// The paint task can quickly query for a particular size of buffer when it /// needs it. pub struct BufferMap { /// A HashMap that stores the Buffers. map: HashMap<BufferKey, BufferValue>, /// The current amount of memory stored by the BufferMap's buffers. mem: uint, /// The maximum allowed memory. Unused buffers will be deleted /// when this threshold is exceeded. max_mem: uint, /// A monotonically increasing counter to track how recently tile sizes were used. counter: uint, } /// A key with which to store buffers. It is based on the size of the buffer. #[derive(Eq, Copy)] struct BufferKey([uint; 2]); impl<H: Hasher+Writer> Hash<H> for BufferKey { fn hash(&self, state: &mut H)
} impl PartialEq for BufferKey { fn eq(&self, other: &BufferKey) -> bool { let BufferKey(s) = *self; let BufferKey(o) = *other; s[0] == o[0] && s[1] == o[1] } } /// Create a key from a given size impl BufferKey { fn get(input: Size2D<uint>) -> BufferKey { BufferKey([input.width, input.height]) } } /// A helper struct to keep track of buffers in the HashMap struct BufferValue { /// An array of buffers, all the same size buffers: Vec<Box<LayerBuffer>>, /// The counter when this size was last requested last_action: uint, } impl BufferMap { // Creates a new BufferMap with a given buffer limit. pub fn new(max_mem: uint) -> BufferMap { BufferMap { map: HashMap::new(), mem: 0u, max_mem: max_mem, counter: 0u, } } /// Insert a new buffer into the map. pub fn insert(&mut self, graphics_context: &NativePaintingGraphicsContext, new_buffer: Box<LayerBuffer>) { let new_key = BufferKey::get(new_buffer.get_size_2d()); // If all our buffers are the same size and we're already at our // memory limit, no need to store this new buffer; just let it drop. if self.mem + new_buffer.get_mem() > self.max_mem && self.map.len() == 1 && self.map.contains_key(&new_key) { new_buffer.destroy(graphics_context); return; } self.mem += new_buffer.get_mem(); // use lazy insertion function to prevent unnecessary allocation let counter = &self.counter; match self.map.entry(new_key) { Occupied(entry) => { entry.into_mut().buffers.push(new_buffer); } Vacant(entry) => { entry.insert(BufferValue { buffers: vec!(new_buffer), last_action: *counter, }); } } let mut opt_key: Option<BufferKey> = None; while self.mem > self.max_mem { let old_key = match opt_key { Some(key) => key, None => { match self.map.iter().min_by(|&(_, x)| x.last_action) { Some((k, _)) => *k, None => panic!("BufferMap: tried to delete with no elements in map"), } } }; if { let list = &mut self.map[old_key].buffers; let condemned_buffer = list.pop().take().unwrap(); self.mem -= condemned_buffer.get_mem(); condemned_buffer.destroy(graphics_context); list.is_empty() } { // then self.map.remove(&old_key); // Don't store empty vectors! opt_key = None; } else { opt_key = Some(old_key); } } } // Try to find a buffer for the given size. pub fn find(&mut self, size: Size2D<uint>) -> Option<Box<LayerBuffer>> { let mut flag = false; // True if key needs to be popped after retrieval. let key = BufferKey::get(size); let ret = match self.map.get_mut(&key) { Some(ref mut buffer_val) => { buffer_val.last_action = self.counter; self.counter += 1; let buffer = buffer_val.buffers.pop().take().unwrap(); self.mem -= buffer.get_mem(); if buffer_val.buffers.is_empty() { flag = true; } Some(buffer) } None => None, }; if flag { self.map.remove(&key); // Don't store empty vectors! } ret } /// Destroys all buffers. pub fn clear(&mut self, graphics_context: &NativePaintingGraphicsContext) { let map = mem::replace(&mut self.map, HashMap::new()); for (_, value) in map.into_iter() { for tile in value.buffers.into_iter() { tile.destroy(graphics_context) } } self.mem = 0 } }
{ let BufferKey(ref bytes) = *self; bytes.as_slice().hash(state); }
identifier_body
buffer_map.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; use geom::size::Size2D; use layers::platform::surface::NativePaintingGraphicsContext; use layers::layers::LayerBuffer; use std::hash::{Hash, Hasher, Writer}; use std::mem; /// This is a struct used to store buffers when they are not in use. /// The paint task can quickly query for a particular size of buffer when it /// needs it. pub struct BufferMap { /// A HashMap that stores the Buffers. map: HashMap<BufferKey, BufferValue>, /// The current amount of memory stored by the BufferMap's buffers. mem: uint, /// The maximum allowed memory. Unused buffers will be deleted /// when this threshold is exceeded. max_mem: uint, /// A monotonically increasing counter to track how recently tile sizes were used. counter: uint, } /// A key with which to store buffers. It is based on the size of the buffer. #[derive(Eq, Copy)] struct BufferKey([uint; 2]); impl<H: Hasher+Writer> Hash<H> for BufferKey { fn hash(&self, state: &mut H) { let BufferKey(ref bytes) = *self; bytes.as_slice().hash(state); } } impl PartialEq for BufferKey { fn eq(&self, other: &BufferKey) -> bool { let BufferKey(s) = *self; let BufferKey(o) = *other; s[0] == o[0] && s[1] == o[1] } } /// Create a key from a given size impl BufferKey { fn get(input: Size2D<uint>) -> BufferKey { BufferKey([input.width, input.height]) } } /// A helper struct to keep track of buffers in the HashMap struct BufferValue { /// An array of buffers, all the same size buffers: Vec<Box<LayerBuffer>>, /// The counter when this size was last requested last_action: uint, } impl BufferMap { // Creates a new BufferMap with a given buffer limit. pub fn new(max_mem: uint) -> BufferMap { BufferMap { map: HashMap::new(), mem: 0u, max_mem: max_mem, counter: 0u, } } /// Insert a new buffer into the map. pub fn insert(&mut self, graphics_context: &NativePaintingGraphicsContext, new_buffer: Box<LayerBuffer>) { let new_key = BufferKey::get(new_buffer.get_size_2d()); // If all our buffers are the same size and we're already at our // memory limit, no need to store this new buffer; just let it drop. if self.mem + new_buffer.get_mem() > self.max_mem && self.map.len() == 1 && self.map.contains_key(&new_key) { new_buffer.destroy(graphics_context); return; } self.mem += new_buffer.get_mem(); // use lazy insertion function to prevent unnecessary allocation let counter = &self.counter; match self.map.entry(new_key) { Occupied(entry) => { entry.into_mut().buffers.push(new_buffer); } Vacant(entry) => { entry.insert(BufferValue { buffers: vec!(new_buffer), last_action: *counter, }); } } let mut opt_key: Option<BufferKey> = None; while self.mem > self.max_mem { let old_key = match opt_key { Some(key) => key, None => { match self.map.iter().min_by(|&(_, x)| x.last_action) { Some((k, _)) => *k, None => panic!("BufferMap: tried to delete with no elements in map"), } } }; if { let list = &mut self.map[old_key].buffers; let condemned_buffer = list.pop().take().unwrap(); self.mem -= condemned_buffer.get_mem(); condemned_buffer.destroy(graphics_context); list.is_empty() } { // then self.map.remove(&old_key); // Don't store empty vectors! opt_key = None; } else { opt_key = Some(old_key); } } } // Try to find a buffer for the given size. pub fn
(&mut self, size: Size2D<uint>) -> Option<Box<LayerBuffer>> { let mut flag = false; // True if key needs to be popped after retrieval. let key = BufferKey::get(size); let ret = match self.map.get_mut(&key) { Some(ref mut buffer_val) => { buffer_val.last_action = self.counter; self.counter += 1; let buffer = buffer_val.buffers.pop().take().unwrap(); self.mem -= buffer.get_mem(); if buffer_val.buffers.is_empty() { flag = true; } Some(buffer) } None => None, }; if flag { self.map.remove(&key); // Don't store empty vectors! } ret } /// Destroys all buffers. pub fn clear(&mut self, graphics_context: &NativePaintingGraphicsContext) { let map = mem::replace(&mut self.map, HashMap::new()); for (_, value) in map.into_iter() { for tile in value.buffers.into_iter() { tile.destroy(graphics_context) } } self.mem = 0 } }
find
identifier_name
buffer_map.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; use geom::size::Size2D; use layers::platform::surface::NativePaintingGraphicsContext; use layers::layers::LayerBuffer; use std::hash::{Hash, Hasher, Writer}; use std::mem; /// This is a struct used to store buffers when they are not in use. /// The paint task can quickly query for a particular size of buffer when it /// needs it.
mem: uint, /// The maximum allowed memory. Unused buffers will be deleted /// when this threshold is exceeded. max_mem: uint, /// A monotonically increasing counter to track how recently tile sizes were used. counter: uint, } /// A key with which to store buffers. It is based on the size of the buffer. #[derive(Eq, Copy)] struct BufferKey([uint; 2]); impl<H: Hasher+Writer> Hash<H> for BufferKey { fn hash(&self, state: &mut H) { let BufferKey(ref bytes) = *self; bytes.as_slice().hash(state); } } impl PartialEq for BufferKey { fn eq(&self, other: &BufferKey) -> bool { let BufferKey(s) = *self; let BufferKey(o) = *other; s[0] == o[0] && s[1] == o[1] } } /// Create a key from a given size impl BufferKey { fn get(input: Size2D<uint>) -> BufferKey { BufferKey([input.width, input.height]) } } /// A helper struct to keep track of buffers in the HashMap struct BufferValue { /// An array of buffers, all the same size buffers: Vec<Box<LayerBuffer>>, /// The counter when this size was last requested last_action: uint, } impl BufferMap { // Creates a new BufferMap with a given buffer limit. pub fn new(max_mem: uint) -> BufferMap { BufferMap { map: HashMap::new(), mem: 0u, max_mem: max_mem, counter: 0u, } } /// Insert a new buffer into the map. pub fn insert(&mut self, graphics_context: &NativePaintingGraphicsContext, new_buffer: Box<LayerBuffer>) { let new_key = BufferKey::get(new_buffer.get_size_2d()); // If all our buffers are the same size and we're already at our // memory limit, no need to store this new buffer; just let it drop. if self.mem + new_buffer.get_mem() > self.max_mem && self.map.len() == 1 && self.map.contains_key(&new_key) { new_buffer.destroy(graphics_context); return; } self.mem += new_buffer.get_mem(); // use lazy insertion function to prevent unnecessary allocation let counter = &self.counter; match self.map.entry(new_key) { Occupied(entry) => { entry.into_mut().buffers.push(new_buffer); } Vacant(entry) => { entry.insert(BufferValue { buffers: vec!(new_buffer), last_action: *counter, }); } } let mut opt_key: Option<BufferKey> = None; while self.mem > self.max_mem { let old_key = match opt_key { Some(key) => key, None => { match self.map.iter().min_by(|&(_, x)| x.last_action) { Some((k, _)) => *k, None => panic!("BufferMap: tried to delete with no elements in map"), } } }; if { let list = &mut self.map[old_key].buffers; let condemned_buffer = list.pop().take().unwrap(); self.mem -= condemned_buffer.get_mem(); condemned_buffer.destroy(graphics_context); list.is_empty() } { // then self.map.remove(&old_key); // Don't store empty vectors! opt_key = None; } else { opt_key = Some(old_key); } } } // Try to find a buffer for the given size. pub fn find(&mut self, size: Size2D<uint>) -> Option<Box<LayerBuffer>> { let mut flag = false; // True if key needs to be popped after retrieval. let key = BufferKey::get(size); let ret = match self.map.get_mut(&key) { Some(ref mut buffer_val) => { buffer_val.last_action = self.counter; self.counter += 1; let buffer = buffer_val.buffers.pop().take().unwrap(); self.mem -= buffer.get_mem(); if buffer_val.buffers.is_empty() { flag = true; } Some(buffer) } None => None, }; if flag { self.map.remove(&key); // Don't store empty vectors! } ret } /// Destroys all buffers. pub fn clear(&mut self, graphics_context: &NativePaintingGraphicsContext) { let map = mem::replace(&mut self.map, HashMap::new()); for (_, value) in map.into_iter() { for tile in value.buffers.into_iter() { tile.destroy(graphics_context) } } self.mem = 0 } }
pub struct BufferMap { /// A HashMap that stores the Buffers. map: HashMap<BufferKey, BufferValue>, /// The current amount of memory stored by the BufferMap's buffers.
random_line_split
inflightmap.rs
use std::cmp::Ordering; use std::collections::hash_map::{Entry, RandomState}; use std::collections::{BinaryHeap, HashMap}; use std::fmt; use std::hash::{BuildHasher, Hash}; use std::ops::Deref; // TODO: need a more efficient implementation and possibly more flexibility #[derive(Debug)] pub struct InFlightMap<K: Hash + Eq + Copy, V, T: Ord + Copy, H: BuildHasher = RandomState> { map: HashMap<K, V, H>, heap: BinaryHeap<Pair<T, K>>, } impl<K: Hash + Eq + Copy + fmt::Debug, V, T: Ord + Copy, H: BuildHasher + Default> InFlightMap<K, V, T, H> { pub fn new() -> Self { InFlightMap { map: Default::default(), heap: Default::default(), } } pub fn clear(&mut self) { self.map.clear(); self.heap.clear(); } pub fn remove(&mut self, key: &K) -> Option<V> { self.map.remove(key) } pub fn entry_with_timeout(&mut self, key: K, expire: T) -> Entry<K, V> { self.heap.push(Pair(expire, key)); self.map.entry(key) } pub fn entry(&mut self, key: K) -> Entry<K, V> { self.map.entry(key) } pub fn insert(&mut self, key: K, value: V, expire: T) -> &mut V { self.heap.push(Pair(expire, key)); let mut inserted = false; let result = self.map.entry(key).or_insert_with(|| { inserted = true; value }); if!inserted { panic!("{:?} is already present in the map", key); } result } pub fn pop_expired(&mut self, now: T) -> Option<(K, V)> { loop { let key = match self.heap.peek() { Some(&Pair(e, k)) if now >= e => k, _ => return None, }; self.heap.pop(); if let Some(v) = self.map.remove(&key) { return Some((key, v)); } } } pub fn touch_expired(&mut self, now: T, expire: T) -> Option<(K, &V)> { loop { let key = match self.heap.peek() { Some(&Pair(e, k)) if now >= e => k, _ => return None, }; if let Some(v) = self.map.get(&key) { *self.heap.peek_mut().unwrap() = Pair(expire, key); return Some((key, &v)); } else { self.heap.pop(); } } } } impl<K: Hash + Eq + Copy, V, T: Ord + Copy, H: BuildHasher> Deref for InFlightMap<K, V, T, H> { type Target = HashMap<K, V, H>; fn deref(&self) -> &Self::Target { &self.map } } // Like a 2-tuple but comparison is only done for the first item #[derive(Debug)] struct Pair<T, V>(T, V); impl<T: PartialEq, V> PartialEq<Pair<T, V>> for Pair<T, V> { fn eq(&self, other: &Pair<T, V>) -> bool
} impl<T: Eq, V> Eq for Pair<T, V> {} impl<T: PartialOrd, V> PartialOrd<Pair<T, V>> for Pair<T, V> { fn partial_cmp(&self, other: &Pair<T, V>) -> Option<Ordering> { other.0.partial_cmp(&self.0) } } impl<T: Ord, V> Ord for Pair<T, V> { fn cmp(&self, other: &Pair<T, V>) -> Ordering { other.0.cmp(&self.0) } }
{ other.0.eq(&self.0) }
identifier_body
inflightmap.rs
use std::cmp::Ordering; use std::collections::hash_map::{Entry, RandomState}; use std::collections::{BinaryHeap, HashMap}; use std::fmt; use std::hash::{BuildHasher, Hash}; use std::ops::Deref; // TODO: need a more efficient implementation and possibly more flexibility #[derive(Debug)] pub struct InFlightMap<K: Hash + Eq + Copy, V, T: Ord + Copy, H: BuildHasher = RandomState> { map: HashMap<K, V, H>, heap: BinaryHeap<Pair<T, K>>, } impl<K: Hash + Eq + Copy + fmt::Debug, V, T: Ord + Copy, H: BuildHasher + Default> InFlightMap<K, V, T, H> { pub fn new() -> Self { InFlightMap { map: Default::default(), heap: Default::default(), } } pub fn clear(&mut self) { self.map.clear(); self.heap.clear(); } pub fn
(&mut self, key: &K) -> Option<V> { self.map.remove(key) } pub fn entry_with_timeout(&mut self, key: K, expire: T) -> Entry<K, V> { self.heap.push(Pair(expire, key)); self.map.entry(key) } pub fn entry(&mut self, key: K) -> Entry<K, V> { self.map.entry(key) } pub fn insert(&mut self, key: K, value: V, expire: T) -> &mut V { self.heap.push(Pair(expire, key)); let mut inserted = false; let result = self.map.entry(key).or_insert_with(|| { inserted = true; value }); if!inserted { panic!("{:?} is already present in the map", key); } result } pub fn pop_expired(&mut self, now: T) -> Option<(K, V)> { loop { let key = match self.heap.peek() { Some(&Pair(e, k)) if now >= e => k, _ => return None, }; self.heap.pop(); if let Some(v) = self.map.remove(&key) { return Some((key, v)); } } } pub fn touch_expired(&mut self, now: T, expire: T) -> Option<(K, &V)> { loop { let key = match self.heap.peek() { Some(&Pair(e, k)) if now >= e => k, _ => return None, }; if let Some(v) = self.map.get(&key) { *self.heap.peek_mut().unwrap() = Pair(expire, key); return Some((key, &v)); } else { self.heap.pop(); } } } } impl<K: Hash + Eq + Copy, V, T: Ord + Copy, H: BuildHasher> Deref for InFlightMap<K, V, T, H> { type Target = HashMap<K, V, H>; fn deref(&self) -> &Self::Target { &self.map } } // Like a 2-tuple but comparison is only done for the first item #[derive(Debug)] struct Pair<T, V>(T, V); impl<T: PartialEq, V> PartialEq<Pair<T, V>> for Pair<T, V> { fn eq(&self, other: &Pair<T, V>) -> bool { other.0.eq(&self.0) } } impl<T: Eq, V> Eq for Pair<T, V> {} impl<T: PartialOrd, V> PartialOrd<Pair<T, V>> for Pair<T, V> { fn partial_cmp(&self, other: &Pair<T, V>) -> Option<Ordering> { other.0.partial_cmp(&self.0) } } impl<T: Ord, V> Ord for Pair<T, V> { fn cmp(&self, other: &Pair<T, V>) -> Ordering { other.0.cmp(&self.0) } }
remove
identifier_name
inflightmap.rs
use std::cmp::Ordering; use std::collections::hash_map::{Entry, RandomState}; use std::collections::{BinaryHeap, HashMap}; use std::fmt; use std::hash::{BuildHasher, Hash}; use std::ops::Deref; // TODO: need a more efficient implementation and possibly more flexibility #[derive(Debug)] pub struct InFlightMap<K: Hash + Eq + Copy, V, T: Ord + Copy, H: BuildHasher = RandomState> { map: HashMap<K, V, H>, heap: BinaryHeap<Pair<T, K>>, } impl<K: Hash + Eq + Copy + fmt::Debug, V, T: Ord + Copy, H: BuildHasher + Default> InFlightMap<K, V, T, H> { pub fn new() -> Self { InFlightMap { map: Default::default(), heap: Default::default(), } } pub fn clear(&mut self) { self.map.clear(); self.heap.clear(); }
} pub fn entry_with_timeout(&mut self, key: K, expire: T) -> Entry<K, V> { self.heap.push(Pair(expire, key)); self.map.entry(key) } pub fn entry(&mut self, key: K) -> Entry<K, V> { self.map.entry(key) } pub fn insert(&mut self, key: K, value: V, expire: T) -> &mut V { self.heap.push(Pair(expire, key)); let mut inserted = false; let result = self.map.entry(key).or_insert_with(|| { inserted = true; value }); if!inserted { panic!("{:?} is already present in the map", key); } result } pub fn pop_expired(&mut self, now: T) -> Option<(K, V)> { loop { let key = match self.heap.peek() { Some(&Pair(e, k)) if now >= e => k, _ => return None, }; self.heap.pop(); if let Some(v) = self.map.remove(&key) { return Some((key, v)); } } } pub fn touch_expired(&mut self, now: T, expire: T) -> Option<(K, &V)> { loop { let key = match self.heap.peek() { Some(&Pair(e, k)) if now >= e => k, _ => return None, }; if let Some(v) = self.map.get(&key) { *self.heap.peek_mut().unwrap() = Pair(expire, key); return Some((key, &v)); } else { self.heap.pop(); } } } } impl<K: Hash + Eq + Copy, V, T: Ord + Copy, H: BuildHasher> Deref for InFlightMap<K, V, T, H> { type Target = HashMap<K, V, H>; fn deref(&self) -> &Self::Target { &self.map } } // Like a 2-tuple but comparison is only done for the first item #[derive(Debug)] struct Pair<T, V>(T, V); impl<T: PartialEq, V> PartialEq<Pair<T, V>> for Pair<T, V> { fn eq(&self, other: &Pair<T, V>) -> bool { other.0.eq(&self.0) } } impl<T: Eq, V> Eq for Pair<T, V> {} impl<T: PartialOrd, V> PartialOrd<Pair<T, V>> for Pair<T, V> { fn partial_cmp(&self, other: &Pair<T, V>) -> Option<Ordering> { other.0.partial_cmp(&self.0) } } impl<T: Ord, V> Ord for Pair<T, V> { fn cmp(&self, other: &Pair<T, V>) -> Ordering { other.0.cmp(&self.0) } }
pub fn remove(&mut self, key: &K) -> Option<V> { self.map.remove(key)
random_line_split
list_documents.rs
use crate::domain::model::error::Error as ModelError; use crate::domain::ports::secondary::list::{List, Parameters}; use async_trait::async_trait; use common::document::ContainerDocument; use futures::stream::{Stream, StreamExt}; use std::pin::Pin; use tracing::info_span; use tracing_futures::Instrument; type PinnedStream<T> = Pin<Box<dyn Stream<Item = T> + Send +'static>>; #[async_trait] pub trait ListDocuments<D> { async fn list_documents(&self) -> Result<PinnedStream<Result<D, ModelError>>, ModelError>; } #[async_trait] impl<D, T> ListDocuments<D> for T where D: ContainerDocument + Send + Sync +'static, T: List<D> + Send + Sync, { async fn
(&self) -> Result<PinnedStream<Result<D, ModelError>>, ModelError> { let doc_type = D::static_doc_type().to_string(); let documents = self .list_documents(Parameters { doc_type }) .await? .map(|raw| raw.map_err(|err| ModelError::DocumentRetrievalError { source: err.into() })) .instrument(info_span!( "List documents", doc_type = D::static_doc_type(), )); Ok(documents.boxed()) } }
list_documents
identifier_name
list_documents.rs
use crate::domain::model::error::Error as ModelError; use crate::domain::ports::secondary::list::{List, Parameters}; use async_trait::async_trait; use common::document::ContainerDocument;
use tracing_futures::Instrument; type PinnedStream<T> = Pin<Box<dyn Stream<Item = T> + Send +'static>>; #[async_trait] pub trait ListDocuments<D> { async fn list_documents(&self) -> Result<PinnedStream<Result<D, ModelError>>, ModelError>; } #[async_trait] impl<D, T> ListDocuments<D> for T where D: ContainerDocument + Send + Sync +'static, T: List<D> + Send + Sync, { async fn list_documents(&self) -> Result<PinnedStream<Result<D, ModelError>>, ModelError> { let doc_type = D::static_doc_type().to_string(); let documents = self .list_documents(Parameters { doc_type }) .await? .map(|raw| raw.map_err(|err| ModelError::DocumentRetrievalError { source: err.into() })) .instrument(info_span!( "List documents", doc_type = D::static_doc_type(), )); Ok(documents.boxed()) } }
use futures::stream::{Stream, StreamExt}; use std::pin::Pin; use tracing::info_span;
random_line_split
list_documents.rs
use crate::domain::model::error::Error as ModelError; use crate::domain::ports::secondary::list::{List, Parameters}; use async_trait::async_trait; use common::document::ContainerDocument; use futures::stream::{Stream, StreamExt}; use std::pin::Pin; use tracing::info_span; use tracing_futures::Instrument; type PinnedStream<T> = Pin<Box<dyn Stream<Item = T> + Send +'static>>; #[async_trait] pub trait ListDocuments<D> { async fn list_documents(&self) -> Result<PinnedStream<Result<D, ModelError>>, ModelError>; } #[async_trait] impl<D, T> ListDocuments<D> for T where D: ContainerDocument + Send + Sync +'static, T: List<D> + Send + Sync, { async fn list_documents(&self) -> Result<PinnedStream<Result<D, ModelError>>, ModelError>
}
{ let doc_type = D::static_doc_type().to_string(); let documents = self .list_documents(Parameters { doc_type }) .await? .map(|raw| raw.map_err(|err| ModelError::DocumentRetrievalError { source: err.into() })) .instrument(info_span!( "List documents", doc_type = D::static_doc_type(), )); Ok(documents.boxed()) }
identifier_body
equals.rs
#![feature(core)] extern crate core; #[cfg(test)] mod tests { use core::iter::Iterator;
use core::cmp::Eq; struct A<T: Eq> { begin: T, end: T } macro_rules! Iterator_impl { ($T:ty) => { impl Iterator for A<$T> { type Item = $T; fn next(&mut self) -> Option<Self::Item> { if self.begin < self.end { let result = self.begin; self.begin = self.begin.wrapping_add(1); Some::<Self::Item>(result) } else { None::<Self::Item> } } } } } // pub fn equals<A, L, R>(mut a: L, mut b: R) -> bool where // A: Eq, // L: Iterator<Item=A>, // R: Iterator<Item=A>, // { // loop { // match (a.next(), b.next()) { // (None, None) => return true, // (None, _) | (_, None) => return false, // (Some(x), Some(y)) => if x!= y { return false }, // } // } // } type T = i32; Iterator_impl!(T); type AA = T; type L = A<T>; type R = A<T>; #[test] fn equals_test1() { let a: L = L { begin: 0, end: 10 }; let b: R = R { begin: 0, end: 10 }; let result: bool = equals::<AA, L, R>(a, b); assert_eq!(result, true); } #[test] fn equals_test2() { let a: L = L { begin: 0, end: 10 }; let b: R = R { begin: 0, end: 11 }; let result: bool = equals::<AA, L, R>(a, b); assert_eq!(result, false); } #[test] fn equals_test3() { let a: L = L { begin: 0, end: 0 }; let b: R = R { begin: 0, end: 0 }; let result: bool = equals::<AA, L, R>(a, b); assert_eq!(result, true); } }
use core::iter::order::equals;
random_line_split
equals.rs
#![feature(core)] extern crate core; #[cfg(test)] mod tests { use core::iter::Iterator; use core::iter::order::equals; use core::cmp::Eq; struct A<T: Eq> { begin: T, end: T } macro_rules! Iterator_impl { ($T:ty) => { impl Iterator for A<$T> { type Item = $T; fn next(&mut self) -> Option<Self::Item> { if self.begin < self.end { let result = self.begin; self.begin = self.begin.wrapping_add(1); Some::<Self::Item>(result) } else { None::<Self::Item> } } } } } // pub fn equals<A, L, R>(mut a: L, mut b: R) -> bool where // A: Eq, // L: Iterator<Item=A>, // R: Iterator<Item=A>, // { // loop { // match (a.next(), b.next()) { // (None, None) => return true, // (None, _) | (_, None) => return false, // (Some(x), Some(y)) => if x!= y { return false }, // } // } // } type T = i32; Iterator_impl!(T); type AA = T; type L = A<T>; type R = A<T>; #[test] fn
() { let a: L = L { begin: 0, end: 10 }; let b: R = R { begin: 0, end: 10 }; let result: bool = equals::<AA, L, R>(a, b); assert_eq!(result, true); } #[test] fn equals_test2() { let a: L = L { begin: 0, end: 10 }; let b: R = R { begin: 0, end: 11 }; let result: bool = equals::<AA, L, R>(a, b); assert_eq!(result, false); } #[test] fn equals_test3() { let a: L = L { begin: 0, end: 0 }; let b: R = R { begin: 0, end: 0 }; let result: bool = equals::<AA, L, R>(a, b); assert_eq!(result, true); } }
equals_test1
identifier_name
issue-13808.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(unknown_features)]
} impl<'a> Foo<'a> { fn new<F>(listener: F) -> Foo<'a> where F: FnMut() + 'a { Foo { listener: box listener } } } fn main() { let a = Foo::new(|| {}); }
#![feature(box_syntax)] struct Foo<'a> { listener: Box<FnMut() + 'a>,
random_line_split
issue-13808.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(unknown_features)] #![feature(box_syntax)] struct Foo<'a> { listener: Box<FnMut() + 'a>, } impl<'a> Foo<'a> { fn new<F>(listener: F) -> Foo<'a> where F: FnMut() + 'a { Foo { listener: box listener } } } fn main()
{ let a = Foo::new(|| {}); }
identifier_body
issue-13808.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(unknown_features)] #![feature(box_syntax)] struct
<'a> { listener: Box<FnMut() + 'a>, } impl<'a> Foo<'a> { fn new<F>(listener: F) -> Foo<'a> where F: FnMut() + 'a { Foo { listener: box listener } } } fn main() { let a = Foo::new(|| {}); }
Foo
identifier_name
workspace.rs
use crate::config::GeneralConfig; use crate::core::stack::Stack; use crate::layout::{Layout, LayoutMessage}; use crate::window_system::{Window, WindowSystem}; /// Represents a single workspace with a `tag` (name), /// `id`, a `layout` and a `stack` for all windows pub struct Workspace { pub id: u32, pub tag: String, pub layout: Box<dyn Layout>, pub stack: Option<Stack<Window>>, } impl Clone for Workspace { fn clone(&self) -> Workspace { Workspace { id: self.id, tag: self.tag.clone(), layout: self.layout.copy(), stack: self.stack.clone(), } } } impl Workspace { /// Create a new workspace pub fn new( id: u32, tag: String, layout: Box<dyn Layout>, stack: Option<Stack<Window>>, ) -> Workspace { Workspace { id, tag, layout, stack, } } /// Add a new window to the workspace by adding it to the stack. /// If the stack doesn't exist yet, create one. pub fn add(&self, window: Window) -> Workspace { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), Some( self.stack .clone() .map_or(Stack::from_element(window), |s| s.add(window)), ), ) } /// Returns the number of windows contained in this workspace pub fn
(&self) -> usize { self.stack.clone().map_or(0usize, |x| x.len()) } pub fn is_empty(&self) -> bool { self.len() == 0 } /// Checks if the workspace contains the given window pub fn contains(&self, window: Window) -> bool { self.stack.clone().map_or(false, |x| x.contains(window)) } pub fn windows(&self) -> Vec<Window> { self.stack.clone().map_or(Vec::new(), |s| s.integrate()) } pub fn peek(&self) -> Option<Window> { self.stack.clone().map(|s| s.focus) } pub fn map<F>(&self, f: F) -> Workspace where F: Fn(Stack<Window>) -> Stack<Window>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), self.stack.clone().map(|x| f(x)), ) } pub fn map_option<F>(&self, f: F) -> Workspace where F: Fn(Stack<Window>) -> Option<Stack<Window>>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), self.stack.clone().and_then(|x| f(x)), ) } pub fn map_or<F>(&self, default: Stack<Window>, f: F) -> Workspace where F: Fn(Stack<Window>) -> Stack<Window>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), Some(self.stack.clone().map_or(default, |x| f(x))), ) } pub fn send_layout_message( &self, message: LayoutMessage, window_system: &dyn WindowSystem, config: &GeneralConfig, ) -> Workspace { let mut layout = self.layout.copy(); layout.apply_message(message, window_system, &self.stack, config); Workspace::new(self.id, self.tag.clone(), layout, self.stack.clone()) } }
len
identifier_name
workspace.rs
use crate::config::GeneralConfig; use crate::core::stack::Stack; use crate::layout::{Layout, LayoutMessage}; use crate::window_system::{Window, WindowSystem}; /// Represents a single workspace with a `tag` (name), /// `id`, a `layout` and a `stack` for all windows pub struct Workspace { pub id: u32, pub tag: String, pub layout: Box<dyn Layout>, pub stack: Option<Stack<Window>>, } impl Clone for Workspace { fn clone(&self) -> Workspace { Workspace { id: self.id, tag: self.tag.clone(), layout: self.layout.copy(), stack: self.stack.clone(), } } } impl Workspace { /// Create a new workspace pub fn new( id: u32, tag: String, layout: Box<dyn Layout>, stack: Option<Stack<Window>>, ) -> Workspace { Workspace { id, tag, layout, stack, } } /// Add a new window to the workspace by adding it to the stack. /// If the stack doesn't exist yet, create one. pub fn add(&self, window: Window) -> Workspace { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), Some( self.stack .clone() .map_or(Stack::from_element(window), |s| s.add(window)), ), ) } /// Returns the number of windows contained in this workspace pub fn len(&self) -> usize { self.stack.clone().map_or(0usize, |x| x.len()) } pub fn is_empty(&self) -> bool { self.len() == 0 } /// Checks if the workspace contains the given window pub fn contains(&self, window: Window) -> bool { self.stack.clone().map_or(false, |x| x.contains(window)) } pub fn windows(&self) -> Vec<Window> { self.stack.clone().map_or(Vec::new(), |s| s.integrate()) } pub fn peek(&self) -> Option<Window> { self.stack.clone().map(|s| s.focus) } pub fn map<F>(&self, f: F) -> Workspace where F: Fn(Stack<Window>) -> Stack<Window>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), self.stack.clone().map(|x| f(x)), ) } pub fn map_option<F>(&self, f: F) -> Workspace where F: Fn(Stack<Window>) -> Option<Stack<Window>>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), self.stack.clone().and_then(|x| f(x)), ) }
pub fn map_or<F>(&self, default: Stack<Window>, f: F) -> Workspace where F: Fn(Stack<Window>) -> Stack<Window>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), Some(self.stack.clone().map_or(default, |x| f(x))), ) } pub fn send_layout_message( &self, message: LayoutMessage, window_system: &dyn WindowSystem, config: &GeneralConfig, ) -> Workspace { let mut layout = self.layout.copy(); layout.apply_message(message, window_system, &self.stack, config); Workspace::new(self.id, self.tag.clone(), layout, self.stack.clone()) } }
random_line_split
workspace.rs
use crate::config::GeneralConfig; use crate::core::stack::Stack; use crate::layout::{Layout, LayoutMessage}; use crate::window_system::{Window, WindowSystem}; /// Represents a single workspace with a `tag` (name), /// `id`, a `layout` and a `stack` for all windows pub struct Workspace { pub id: u32, pub tag: String, pub layout: Box<dyn Layout>, pub stack: Option<Stack<Window>>, } impl Clone for Workspace { fn clone(&self) -> Workspace { Workspace { id: self.id, tag: self.tag.clone(), layout: self.layout.copy(), stack: self.stack.clone(), } } } impl Workspace { /// Create a new workspace pub fn new( id: u32, tag: String, layout: Box<dyn Layout>, stack: Option<Stack<Window>>, ) -> Workspace { Workspace { id, tag, layout, stack, } } /// Add a new window to the workspace by adding it to the stack. /// If the stack doesn't exist yet, create one. pub fn add(&self, window: Window) -> Workspace
/// Returns the number of windows contained in this workspace pub fn len(&self) -> usize { self.stack.clone().map_or(0usize, |x| x.len()) } pub fn is_empty(&self) -> bool { self.len() == 0 } /// Checks if the workspace contains the given window pub fn contains(&self, window: Window) -> bool { self.stack.clone().map_or(false, |x| x.contains(window)) } pub fn windows(&self) -> Vec<Window> { self.stack.clone().map_or(Vec::new(), |s| s.integrate()) } pub fn peek(&self) -> Option<Window> { self.stack.clone().map(|s| s.focus) } pub fn map<F>(&self, f: F) -> Workspace where F: Fn(Stack<Window>) -> Stack<Window>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), self.stack.clone().map(|x| f(x)), ) } pub fn map_option<F>(&self, f: F) -> Workspace where F: Fn(Stack<Window>) -> Option<Stack<Window>>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), self.stack.clone().and_then(|x| f(x)), ) } pub fn map_or<F>(&self, default: Stack<Window>, f: F) -> Workspace where F: Fn(Stack<Window>) -> Stack<Window>, { Workspace::new( self.id, self.tag.clone(), self.layout.copy(), Some(self.stack.clone().map_or(default, |x| f(x))), ) } pub fn send_layout_message( &self, message: LayoutMessage, window_system: &dyn WindowSystem, config: &GeneralConfig, ) -> Workspace { let mut layout = self.layout.copy(); layout.apply_message(message, window_system, &self.stack, config); Workspace::new(self.id, self.tag.clone(), layout, self.stack.clone()) } }
{ Workspace::new( self.id, self.tag.clone(), self.layout.copy(), Some( self.stack .clone() .map_or(Stack::from_element(window), |s| s.add(window)), ), ) }
identifier_body
protocol.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Low-level wire protocol implementation. Currently only supports //! [JSON packets] //! (https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport#JSON_Packets). use rustc_serialize::{json, Encodable}; use rustc_serialize::json::Json; use rustc_serialize::json::ParserError::{IoError, SyntaxError}; use std::error::Error; use std::io::{Read, Write}; use std::net::TcpStream; pub trait JsonPacketStream { fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T); fn read_json_packet(&mut self) -> Result<Option<Json>, String>; } impl JsonPacketStream for TcpStream { fn
<'a, T: Encodable>(&mut self, obj: &T) { let s = json::encode(obj).unwrap().replace("__type__", "type"); println!("<- {}", s); self.write_all(s.len().to_string().as_bytes()).unwrap(); self.write_all(&[':' as u8]).unwrap(); self.write_all(s.as_bytes()).unwrap(); } fn read_json_packet<'a>(&mut self) -> Result<Option<Json>, String> { // https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport // In short, each JSON packet is [ascii length]:[JSON data of given length] let mut buffer = vec!(); loop { let mut buf = [0]; let byte = match self.read(&mut buf) { Ok(0) => return Ok(None), // EOF Ok(1) => buf[0], Ok(_) => unreachable!(), Err(e) => return Err(e.description().to_string()), }; match byte { b':' => { let packet_len_str = match String::from_utf8(buffer) { Ok(packet_len) => packet_len, Err(_) => return Err("nonvalid UTF8 in packet length".to_string()), }; let packet_len = match u64::from_str_radix(&packet_len_str, 10) { Ok(packet_len) => packet_len, Err(_) => return Err("packet length missing / not parsable".to_string()), }; let mut packet = String::new(); self.take(packet_len).read_to_string(&mut packet).unwrap(); println!("{}", packet); return match Json::from_str(&packet) { Ok(json) => Ok(Some(json)), Err(err) => match err { IoError(ioerr) => return Err(ioerr.description().to_string()), SyntaxError(_, l, c) => return Err(format!("syntax at {}:{}", l, c)), }, }; }, c => buffer.push(c), } } } }
write_json_packet
identifier_name
protocol.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Low-level wire protocol implementation. Currently only supports //! [JSON packets] //! (https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport#JSON_Packets). use rustc_serialize::{json, Encodable}; use rustc_serialize::json::Json; use rustc_serialize::json::ParserError::{IoError, SyntaxError}; use std::error::Error; use std::io::{Read, Write}; use std::net::TcpStream; pub trait JsonPacketStream { fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T); fn read_json_packet(&mut self) -> Result<Option<Json>, String>; } impl JsonPacketStream for TcpStream { fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T) { let s = json::encode(obj).unwrap().replace("__type__", "type"); println!("<- {}", s); self.write_all(s.len().to_string().as_bytes()).unwrap(); self.write_all(&[':' as u8]).unwrap(); self.write_all(s.as_bytes()).unwrap(); } fn read_json_packet<'a>(&mut self) -> Result<Option<Json>, String> { // https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport // In short, each JSON packet is [ascii length]:[JSON data of given length] let mut buffer = vec!(); loop { let mut buf = [0]; let byte = match self.read(&mut buf) { Ok(0) => return Ok(None), // EOF Ok(1) => buf[0], Ok(_) => unreachable!(), Err(e) => return Err(e.description().to_string()), }; match byte { b':' => {
let packet_len = match u64::from_str_radix(&packet_len_str, 10) { Ok(packet_len) => packet_len, Err(_) => return Err("packet length missing / not parsable".to_string()), }; let mut packet = String::new(); self.take(packet_len).read_to_string(&mut packet).unwrap(); println!("{}", packet); return match Json::from_str(&packet) { Ok(json) => Ok(Some(json)), Err(err) => match err { IoError(ioerr) => return Err(ioerr.description().to_string()), SyntaxError(_, l, c) => return Err(format!("syntax at {}:{}", l, c)), }, }; }, c => buffer.push(c), } } } }
let packet_len_str = match String::from_utf8(buffer) { Ok(packet_len) => packet_len, Err(_) => return Err("nonvalid UTF8 in packet length".to_string()), };
random_line_split
protocol.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Low-level wire protocol implementation. Currently only supports //! [JSON packets] //! (https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport#JSON_Packets). use rustc_serialize::{json, Encodable}; use rustc_serialize::json::Json; use rustc_serialize::json::ParserError::{IoError, SyntaxError}; use std::error::Error; use std::io::{Read, Write}; use std::net::TcpStream; pub trait JsonPacketStream { fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T); fn read_json_packet(&mut self) -> Result<Option<Json>, String>; } impl JsonPacketStream for TcpStream { fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T) { let s = json::encode(obj).unwrap().replace("__type__", "type"); println!("<- {}", s); self.write_all(s.len().to_string().as_bytes()).unwrap(); self.write_all(&[':' as u8]).unwrap(); self.write_all(s.as_bytes()).unwrap(); } fn read_json_packet<'a>(&mut self) -> Result<Option<Json>, String>
Err(_) => return Err("packet length missing / not parsable".to_string()), }; let mut packet = String::new(); self.take(packet_len).read_to_string(&mut packet).unwrap(); println!("{}", packet); return match Json::from_str(&packet) { Ok(json) => Ok(Some(json)), Err(err) => match err { IoError(ioerr) => return Err(ioerr.description().to_string()), SyntaxError(_, l, c) => return Err(format!("syntax at {}:{}", l, c)), }, }; }, c => buffer.push(c), } } } }
{ // https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport // In short, each JSON packet is [ascii length]:[JSON data of given length] let mut buffer = vec!(); loop { let mut buf = [0]; let byte = match self.read(&mut buf) { Ok(0) => return Ok(None), // EOF Ok(1) => buf[0], Ok(_) => unreachable!(), Err(e) => return Err(e.description().to_string()), }; match byte { b':' => { let packet_len_str = match String::from_utf8(buffer) { Ok(packet_len) => packet_len, Err(_) => return Err("nonvalid UTF8 in packet length".to_string()), }; let packet_len = match u64::from_str_radix(&packet_len_str, 10) { Ok(packet_len) => packet_len,
identifier_body
cannot-mutate-captured-non-mut-var.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-tidy-linelength // revisions: ast mir //[mir]compile-flags: -Z borrowck=mir #![feature(unboxed_closures)] use std::io::Read; fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F { f } fn
() { let x = 1; to_fn_once(move|| { x = 2; }); //[ast]~^ ERROR: cannot assign to immutable captured outer variable //[mir]~^^ ERROR: cannot assign to `x`, as it is not declared as mutable let s = std::io::stdin(); to_fn_once(move|| { s.read_to_end(&mut Vec::new()); }); //[ast]~^ ERROR: cannot borrow immutable captured outer variable //[mir]~^^ ERROR: cannot borrow `s` as mutable, as it is not declared as mutable }
main
identifier_name
cannot-mutate-captured-non-mut-var.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-tidy-linelength // revisions: ast mir //[mir]compile-flags: -Z borrowck=mir #![feature(unboxed_closures)] use std::io::Read; fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F { f } fn main() {
//[ast]~^ ERROR: cannot assign to immutable captured outer variable //[mir]~^^ ERROR: cannot assign to `x`, as it is not declared as mutable let s = std::io::stdin(); to_fn_once(move|| { s.read_to_end(&mut Vec::new()); }); //[ast]~^ ERROR: cannot borrow immutable captured outer variable //[mir]~^^ ERROR: cannot borrow `s` as mutable, as it is not declared as mutable }
let x = 1; to_fn_once(move|| { x = 2; });
random_line_split
cannot-mutate-captured-non-mut-var.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-tidy-linelength // revisions: ast mir //[mir]compile-flags: -Z borrowck=mir #![feature(unboxed_closures)] use std::io::Read; fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F
fn main() { let x = 1; to_fn_once(move|| { x = 2; }); //[ast]~^ ERROR: cannot assign to immutable captured outer variable //[mir]~^^ ERROR: cannot assign to `x`, as it is not declared as mutable let s = std::io::stdin(); to_fn_once(move|| { s.read_to_end(&mut Vec::new()); }); //[ast]~^ ERROR: cannot borrow immutable captured outer variable //[mir]~^^ ERROR: cannot borrow `s` as mutable, as it is not declared as mutable }
{ f }
identifier_body
main.rs
/* Aurélien DESBRIÈRES aurelien(at)hackers(dot)camp License GNU GPL latest */ // Rust experimentations // Modules Struct Visibility in Rust mod my { // A public struct with a public field of generic type `T` pub struct WhiteBox<T> { pub contents: T, } // A public struct with a private field of generic type `T` #[allow(dead_code)] pub struct Bl
> { contents: T, } impl<T> BlackBox<T> { // A public constructor method pub fn new(contents: T) -> BlackBox<T> { BlackBox { contents: contents, } } } } fn main() { // Public structs with public fields can be constructed as usual let white_box = my::WhiteBox { contents: "public information" }; // and their fields can be normally accessed. println!("The white box contains: {}", white_box.contents); // Public structs with private fields cannot be constructed using field names. // Error! `BlackBox` has private fields //let black_box = my::BlackBox { contents: "classified information" }; // TODO ^ Try uncommenting this line // However, structs with private fields can be created using // public constructors let _black_box = my::BlackBox::new("classified information"); // and the private fields of a public struct cannot be accessed. // Error! The `contents` field is private //println!("The black box contains: {}", _black_box.contents); // TODO ^ Try uncommenting this line }
ackBox<T
identifier_name
main.rs
/* Aurélien DESBRIÈRES aurelien(at)hackers(dot)camp License GNU GPL latest */ // Rust experimentations // Modules Struct Visibility in Rust mod my { // A public struct with a public field of generic type `T` pub struct WhiteBox<T> { pub contents: T, } // A public struct with a private field of generic type `T` #[allow(dead_code)] pub struct BlackBox<T> { contents: T, }
impl<T> BlackBox<T> { // A public constructor method pub fn new(contents: T) -> BlackBox<T> { BlackBox { contents: contents, } } } } fn main() { // Public structs with public fields can be constructed as usual let white_box = my::WhiteBox { contents: "public information" }; // and their fields can be normally accessed. println!("The white box contains: {}", white_box.contents); // Public structs with private fields cannot be constructed using field names. // Error! `BlackBox` has private fields //let black_box = my::BlackBox { contents: "classified information" }; // TODO ^ Try uncommenting this line // However, structs with private fields can be created using // public constructors let _black_box = my::BlackBox::new("classified information"); // and the private fields of a public struct cannot be accessed. // Error! The `contents` field is private //println!("The black box contains: {}", _black_box.contents); // TODO ^ Try uncommenting this line }
random_line_split
radio_tool_button.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use Actionable; use Bin; use Buildable; use Container; use RadioButton; use ToggleToolButton; use ToolButton; use ToolItem; use Widget; use ffi; use glib::object::Cast; use glib::object::IsA; use glib::translate::*; use std::fmt; glib_wrapper! { pub struct RadioToolButton(Object<ffi::GtkRadioToolButton, ffi::GtkRadioToolButtonClass, RadioToolButtonClass>) @extends ToggleToolButton, ToolButton, ToolItem, Bin, Container, Widget, @implements Buildable, Actionable; match fn { get_type => || ffi::gtk_radio_tool_button_get_type(), } } impl RadioToolButton { pub fn new_from_widget<P: IsA<RadioToolButton>>(group: &P) -> RadioToolButton
} pub const NONE_RADIO_TOOL_BUTTON: Option<&RadioToolButton> = None; pub trait RadioToolButtonExt:'static { fn get_group(&self) -> Vec<RadioButton>; } impl<O: IsA<RadioToolButton>> RadioToolButtonExt for O { fn get_group(&self) -> Vec<RadioButton> { unsafe { FromGlibPtrContainer::from_glib_none(ffi::gtk_radio_tool_button_get_group(self.as_ref().to_glib_none().0)) } } } impl fmt::Display for RadioToolButton { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "RadioToolButton") } }
{ skip_assert_initialized!(); unsafe { ToolItem::from_glib_none(ffi::gtk_radio_tool_button_new_from_widget(group.as_ref().to_glib_none().0)).unsafe_cast() } }
identifier_body
radio_tool_button.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use Actionable; use Bin; use Buildable; use Container; use RadioButton; use ToggleToolButton; use ToolButton; use ToolItem; use Widget; use ffi; use glib::object::Cast; use glib::object::IsA; use glib::translate::*; use std::fmt; glib_wrapper! { pub struct RadioToolButton(Object<ffi::GtkRadioToolButton, ffi::GtkRadioToolButtonClass, RadioToolButtonClass>) @extends ToggleToolButton, ToolButton, ToolItem, Bin, Container, Widget, @implements Buildable, Actionable; match fn { get_type => || ffi::gtk_radio_tool_button_get_type(), } } impl RadioToolButton { pub fn new_from_widget<P: IsA<RadioToolButton>>(group: &P) -> RadioToolButton { skip_assert_initialized!(); unsafe {
pub const NONE_RADIO_TOOL_BUTTON: Option<&RadioToolButton> = None; pub trait RadioToolButtonExt:'static { fn get_group(&self) -> Vec<RadioButton>; } impl<O: IsA<RadioToolButton>> RadioToolButtonExt for O { fn get_group(&self) -> Vec<RadioButton> { unsafe { FromGlibPtrContainer::from_glib_none(ffi::gtk_radio_tool_button_get_group(self.as_ref().to_glib_none().0)) } } } impl fmt::Display for RadioToolButton { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "RadioToolButton") } }
ToolItem::from_glib_none(ffi::gtk_radio_tool_button_new_from_widget(group.as_ref().to_glib_none().0)).unsafe_cast() } } }
random_line_split
radio_tool_button.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use Actionable; use Bin; use Buildable; use Container; use RadioButton; use ToggleToolButton; use ToolButton; use ToolItem; use Widget; use ffi; use glib::object::Cast; use glib::object::IsA; use glib::translate::*; use std::fmt; glib_wrapper! { pub struct RadioToolButton(Object<ffi::GtkRadioToolButton, ffi::GtkRadioToolButtonClass, RadioToolButtonClass>) @extends ToggleToolButton, ToolButton, ToolItem, Bin, Container, Widget, @implements Buildable, Actionable; match fn { get_type => || ffi::gtk_radio_tool_button_get_type(), } } impl RadioToolButton { pub fn new_from_widget<P: IsA<RadioToolButton>>(group: &P) -> RadioToolButton { skip_assert_initialized!(); unsafe { ToolItem::from_glib_none(ffi::gtk_radio_tool_button_new_from_widget(group.as_ref().to_glib_none().0)).unsafe_cast() } } } pub const NONE_RADIO_TOOL_BUTTON: Option<&RadioToolButton> = None; pub trait RadioToolButtonExt:'static { fn get_group(&self) -> Vec<RadioButton>; } impl<O: IsA<RadioToolButton>> RadioToolButtonExt for O { fn
(&self) -> Vec<RadioButton> { unsafe { FromGlibPtrContainer::from_glib_none(ffi::gtk_radio_tool_button_get_group(self.as_ref().to_glib_none().0)) } } } impl fmt::Display for RadioToolButton { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "RadioToolButton") } }
get_group
identifier_name
day25.rs
#[macro_use] extern crate lazy_static; extern crate regex; extern crate aoc2016; use std::io::{BufReader, BufRead}; use std::fs::File; use aoc2016::assembunny::Cpu; fn main() { let file = File::open("input/day25.in").expect("Failed to open input"); let reader = BufReader::new(&file); let mut program = Vec::new(); for line in reader.lines() { let line = line.unwrap(); program.push(line.parse().unwrap()); } let mut cpu = Cpu::new(); let mut input = 0; loop { cpu.reset(); cpu.registers[0] = input; let output = cpu.run(&program); let mut good = output.len() % 2 == 0; for (i, &o) in output.iter().enumerate() { if i % 2 == 0 { good = good && o == 0; } else
} if good { break; } input += 1; } println!("1: {:?}", input); }
{ good = good && o == 1; }
conditional_block
day25.rs
#[macro_use] extern crate lazy_static; extern crate regex; extern crate aoc2016; use std::io::{BufReader, BufRead}; use std::fs::File; use aoc2016::assembunny::Cpu; fn main()
let mut good = output.len() % 2 == 0; for (i, &o) in output.iter().enumerate() { if i % 2 == 0 { good = good && o == 0; } else { good = good && o == 1; } } if good { break; } input += 1; } println!("1: {:?}", input); }
{ let file = File::open("input/day25.in").expect("Failed to open input"); let reader = BufReader::new(&file); let mut program = Vec::new(); for line in reader.lines() { let line = line.unwrap(); program.push(line.parse().unwrap()); } let mut cpu = Cpu::new(); let mut input = 0; loop { cpu.reset(); cpu.registers[0] = input; let output = cpu.run(&program);
identifier_body
day25.rs
#[macro_use] extern crate lazy_static; extern crate regex; extern crate aoc2016; use std::io::{BufReader, BufRead}; use std::fs::File; use aoc2016::assembunny::Cpu; fn
() { let file = File::open("input/day25.in").expect("Failed to open input"); let reader = BufReader::new(&file); let mut program = Vec::new(); for line in reader.lines() { let line = line.unwrap(); program.push(line.parse().unwrap()); } let mut cpu = Cpu::new(); let mut input = 0; loop { cpu.reset(); cpu.registers[0] = input; let output = cpu.run(&program); let mut good = output.len() % 2 == 0; for (i, &o) in output.iter().enumerate() { if i % 2 == 0 { good = good && o == 0; } else { good = good && o == 1; } } if good { break; } input += 1; } println!("1: {:?}", input); }
main
identifier_name
day25.rs
#[macro_use] extern crate lazy_static; extern crate regex; extern crate aoc2016; use std::io::{BufReader, BufRead}; use std::fs::File; use aoc2016::assembunny::Cpu; fn main() { let file = File::open("input/day25.in").expect("Failed to open input"); let reader = BufReader::new(&file); let mut program = Vec::new(); for line in reader.lines() { let line = line.unwrap(); program.push(line.parse().unwrap()); } let mut cpu = Cpu::new(); let mut input = 0;
loop { cpu.reset(); cpu.registers[0] = input; let output = cpu.run(&program); let mut good = output.len() % 2 == 0; for (i, &o) in output.iter().enumerate() { if i % 2 == 0 { good = good && o == 0; } else { good = good && o == 1; } } if good { break; } input += 1; } println!("1: {:?}", input); }
random_line_split
docker.rs
use std::io; use std::io::prelude::*; use std::mem; use std::process::{Output, Command, Stdio, ExitStatus}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::{Duration, Instant}; use wait_timeout::ChildExt; pub struct Container { id: String, } impl Container { pub fn new(cmd: &str, args: &[String], env: &[(String, String)], name: &str) -> io::Result<Container> { let out = try!(run(Command::new("docker") .arg("create") .arg("--cap-drop=ALL") .arg("--memory=128m") .arg("--net=none") .arg("--pids-limit=20") .arg("--security-opt=no-new-privileges") .arg("--interactive") .args(&env.iter().map(|&(ref k, ref v)| format!("--env={}={}", k, v)).collect::<Vec<_>>()) .arg(name) .arg(cmd) .stderr(Stdio::inherit()) .args(args))); let stdout = String::from_utf8_lossy(&out.stdout); Ok(Container { id: stdout.trim().to_string(), }) } pub fn run(&self, input: &[u8], timeout: Duration) -> io::Result<(ExitStatus, Vec<u8>, bool)> { let mut cmd = Command::new("docker"); cmd.arg("start") .arg("--attach") .arg("--interactive") .arg(&self.id) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); debug!("attaching with {:?}", cmd); let start = Instant::now(); let mut cmd = try!(cmd.spawn()); try!(cmd.stdin.take().unwrap().write_all(input)); debug!("input written, now waiting"); let mut stdout = cmd.stdout.take().unwrap(); let mut stderr = cmd.stderr.take().unwrap(); let sink = Arc::new(Mutex::new(Vec::new())); let sink2 = sink.clone(); let stdout = thread::spawn(move || append(&sink2, &mut stdout)); let sink2 = sink.clone(); let stderr = thread::spawn(move || append(&sink2, &mut stderr)); let (status, timeout) = match try!(cmd.wait_timeout(timeout)) { Some(status) => { debug!("finished before timeout"); // TODO: document this (unsafe { mem::transmute(status) }, false) } None => { debug!("timeout, going to kill"); try!(run(Command::new("docker").arg("kill").arg(&self.id))); (try!(cmd.wait()), true) } }; stdout.join().unwrap(); stderr.join().unwrap(); debug!("timing: {:?}", start.elapsed()); let mut lock = sink.lock().unwrap(); let output = mem::replace(&mut *lock, Vec::new()); debug!("status: {}", status); { let output_lossy = String::from_utf8_lossy(&output); if output_lossy.len() < 1024
else { let s = output_lossy.chars().take(1024).collect::<String>(); debug!("output (truncated): {}...", s); } } Ok((status, output, timeout)) } } fn append(into: &Mutex<Vec<u8>>, from: &mut Read) { let mut buf = [0; 1024]; while let Ok(amt) = from.read(&mut buf) { if amt == 0 { break } into.lock().unwrap().extend_from_slice(&buf[..amt]); } } impl Drop for Container { fn drop(&mut self) { run(Command::new("docker") .arg("rm") .arg("--force") .arg(&self.id)).unwrap(); } } fn run(cmd: &mut Command) -> io::Result<Output> { debug!("spawning: {:?}", cmd); let start = Instant::now(); let out = try!(cmd.output()); debug!("done in: {:?}", start.elapsed()); debug!("output: {:?}", out); if!out.status.success() { let msg = format!("process failed: {:?}\n{:?}", cmd, out); return Err(io::Error::new(io::ErrorKind::Other, msg)) } Ok(out) }
{ debug!("output: {}", output_lossy); }
conditional_block
docker.rs
use std::io; use std::io::prelude::*; use std::mem; use std::process::{Output, Command, Stdio, ExitStatus}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::{Duration, Instant}; use wait_timeout::ChildExt; pub struct
{ id: String, } impl Container { pub fn new(cmd: &str, args: &[String], env: &[(String, String)], name: &str) -> io::Result<Container> { let out = try!(run(Command::new("docker") .arg("create") .arg("--cap-drop=ALL") .arg("--memory=128m") .arg("--net=none") .arg("--pids-limit=20") .arg("--security-opt=no-new-privileges") .arg("--interactive") .args(&env.iter().map(|&(ref k, ref v)| format!("--env={}={}", k, v)).collect::<Vec<_>>()) .arg(name) .arg(cmd) .stderr(Stdio::inherit()) .args(args))); let stdout = String::from_utf8_lossy(&out.stdout); Ok(Container { id: stdout.trim().to_string(), }) } pub fn run(&self, input: &[u8], timeout: Duration) -> io::Result<(ExitStatus, Vec<u8>, bool)> { let mut cmd = Command::new("docker"); cmd.arg("start") .arg("--attach") .arg("--interactive") .arg(&self.id) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); debug!("attaching with {:?}", cmd); let start = Instant::now(); let mut cmd = try!(cmd.spawn()); try!(cmd.stdin.take().unwrap().write_all(input)); debug!("input written, now waiting"); let mut stdout = cmd.stdout.take().unwrap(); let mut stderr = cmd.stderr.take().unwrap(); let sink = Arc::new(Mutex::new(Vec::new())); let sink2 = sink.clone(); let stdout = thread::spawn(move || append(&sink2, &mut stdout)); let sink2 = sink.clone(); let stderr = thread::spawn(move || append(&sink2, &mut stderr)); let (status, timeout) = match try!(cmd.wait_timeout(timeout)) { Some(status) => { debug!("finished before timeout"); // TODO: document this (unsafe { mem::transmute(status) }, false) } None => { debug!("timeout, going to kill"); try!(run(Command::new("docker").arg("kill").arg(&self.id))); (try!(cmd.wait()), true) } }; stdout.join().unwrap(); stderr.join().unwrap(); debug!("timing: {:?}", start.elapsed()); let mut lock = sink.lock().unwrap(); let output = mem::replace(&mut *lock, Vec::new()); debug!("status: {}", status); { let output_lossy = String::from_utf8_lossy(&output); if output_lossy.len() < 1024 { debug!("output: {}", output_lossy); } else { let s = output_lossy.chars().take(1024).collect::<String>(); debug!("output (truncated): {}...", s); } } Ok((status, output, timeout)) } } fn append(into: &Mutex<Vec<u8>>, from: &mut Read) { let mut buf = [0; 1024]; while let Ok(amt) = from.read(&mut buf) { if amt == 0 { break } into.lock().unwrap().extend_from_slice(&buf[..amt]); } } impl Drop for Container { fn drop(&mut self) { run(Command::new("docker") .arg("rm") .arg("--force") .arg(&self.id)).unwrap(); } } fn run(cmd: &mut Command) -> io::Result<Output> { debug!("spawning: {:?}", cmd); let start = Instant::now(); let out = try!(cmd.output()); debug!("done in: {:?}", start.elapsed()); debug!("output: {:?}", out); if!out.status.success() { let msg = format!("process failed: {:?}\n{:?}", cmd, out); return Err(io::Error::new(io::ErrorKind::Other, msg)) } Ok(out) }
Container
identifier_name
docker.rs
use std::io; use std::io::prelude::*; use std::mem; use std::process::{Output, Command, Stdio, ExitStatus}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::{Duration, Instant}; use wait_timeout::ChildExt; pub struct Container { id: String, } impl Container { pub fn new(cmd: &str, args: &[String], env: &[(String, String)], name: &str) -> io::Result<Container> { let out = try!(run(Command::new("docker") .arg("create") .arg("--cap-drop=ALL") .arg("--memory=128m") .arg("--net=none") .arg("--pids-limit=20") .arg("--security-opt=no-new-privileges") .arg("--interactive") .args(&env.iter().map(|&(ref k, ref v)| format!("--env={}={}", k, v)).collect::<Vec<_>>()) .arg(name) .arg(cmd) .stderr(Stdio::inherit()) .args(args))); let stdout = String::from_utf8_lossy(&out.stdout); Ok(Container { id: stdout.trim().to_string(), }) } pub fn run(&self, input: &[u8], timeout: Duration) -> io::Result<(ExitStatus, Vec<u8>, bool)> { let mut cmd = Command::new("docker"); cmd.arg("start") .arg("--attach") .arg("--interactive") .arg(&self.id) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); debug!("attaching with {:?}", cmd); let start = Instant::now(); let mut cmd = try!(cmd.spawn()); try!(cmd.stdin.take().unwrap().write_all(input)); debug!("input written, now waiting"); let mut stdout = cmd.stdout.take().unwrap(); let mut stderr = cmd.stderr.take().unwrap(); let sink = Arc::new(Mutex::new(Vec::new())); let sink2 = sink.clone(); let stdout = thread::spawn(move || append(&sink2, &mut stdout)); let sink2 = sink.clone(); let stderr = thread::spawn(move || append(&sink2, &mut stderr)); let (status, timeout) = match try!(cmd.wait_timeout(timeout)) { Some(status) => { debug!("finished before timeout"); // TODO: document this (unsafe { mem::transmute(status) }, false) } None => { debug!("timeout, going to kill"); try!(run(Command::new("docker").arg("kill").arg(&self.id))); (try!(cmd.wait()), true) } }; stdout.join().unwrap(); stderr.join().unwrap(); debug!("timing: {:?}", start.elapsed()); let mut lock = sink.lock().unwrap(); let output = mem::replace(&mut *lock, Vec::new()); debug!("status: {}", status); { let output_lossy = String::from_utf8_lossy(&output); if output_lossy.len() < 1024 { debug!("output: {}", output_lossy); } else { let s = output_lossy.chars().take(1024).collect::<String>(); debug!("output (truncated): {}...", s); } } Ok((status, output, timeout)) } } fn append(into: &Mutex<Vec<u8>>, from: &mut Read) { let mut buf = [0; 1024]; while let Ok(amt) = from.read(&mut buf) {
} into.lock().unwrap().extend_from_slice(&buf[..amt]); } } impl Drop for Container { fn drop(&mut self) { run(Command::new("docker") .arg("rm") .arg("--force") .arg(&self.id)).unwrap(); } } fn run(cmd: &mut Command) -> io::Result<Output> { debug!("spawning: {:?}", cmd); let start = Instant::now(); let out = try!(cmd.output()); debug!("done in: {:?}", start.elapsed()); debug!("output: {:?}", out); if!out.status.success() { let msg = format!("process failed: {:?}\n{:?}", cmd, out); return Err(io::Error::new(io::ErrorKind::Other, msg)) } Ok(out) }
if amt == 0 { break
random_line_split
docker.rs
use std::io; use std::io::prelude::*; use std::mem; use std::process::{Output, Command, Stdio, ExitStatus}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::{Duration, Instant}; use wait_timeout::ChildExt; pub struct Container { id: String, } impl Container { pub fn new(cmd: &str, args: &[String], env: &[(String, String)], name: &str) -> io::Result<Container> { let out = try!(run(Command::new("docker") .arg("create") .arg("--cap-drop=ALL") .arg("--memory=128m") .arg("--net=none") .arg("--pids-limit=20") .arg("--security-opt=no-new-privileges") .arg("--interactive") .args(&env.iter().map(|&(ref k, ref v)| format!("--env={}={}", k, v)).collect::<Vec<_>>()) .arg(name) .arg(cmd) .stderr(Stdio::inherit()) .args(args))); let stdout = String::from_utf8_lossy(&out.stdout); Ok(Container { id: stdout.trim().to_string(), }) } pub fn run(&self, input: &[u8], timeout: Duration) -> io::Result<(ExitStatus, Vec<u8>, bool)>
let sink2 = sink.clone(); let stderr = thread::spawn(move || append(&sink2, &mut stderr)); let (status, timeout) = match try!(cmd.wait_timeout(timeout)) { Some(status) => { debug!("finished before timeout"); // TODO: document this (unsafe { mem::transmute(status) }, false) } None => { debug!("timeout, going to kill"); try!(run(Command::new("docker").arg("kill").arg(&self.id))); (try!(cmd.wait()), true) } }; stdout.join().unwrap(); stderr.join().unwrap(); debug!("timing: {:?}", start.elapsed()); let mut lock = sink.lock().unwrap(); let output = mem::replace(&mut *lock, Vec::new()); debug!("status: {}", status); { let output_lossy = String::from_utf8_lossy(&output); if output_lossy.len() < 1024 { debug!("output: {}", output_lossy); } else { let s = output_lossy.chars().take(1024).collect::<String>(); debug!("output (truncated): {}...", s); } } Ok((status, output, timeout)) } } fn append(into: &Mutex<Vec<u8>>, from: &mut Read) { let mut buf = [0; 1024]; while let Ok(amt) = from.read(&mut buf) { if amt == 0 { break } into.lock().unwrap().extend_from_slice(&buf[..amt]); } } impl Drop for Container { fn drop(&mut self) { run(Command::new("docker") .arg("rm") .arg("--force") .arg(&self.id)).unwrap(); } } fn run(cmd: &mut Command) -> io::Result<Output> { debug!("spawning: {:?}", cmd); let start = Instant::now(); let out = try!(cmd.output()); debug!("done in: {:?}", start.elapsed()); debug!("output: {:?}", out); if!out.status.success() { let msg = format!("process failed: {:?}\n{:?}", cmd, out); return Err(io::Error::new(io::ErrorKind::Other, msg)) } Ok(out) }
{ let mut cmd = Command::new("docker"); cmd.arg("start") .arg("--attach") .arg("--interactive") .arg(&self.id) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); debug!("attaching with {:?}", cmd); let start = Instant::now(); let mut cmd = try!(cmd.spawn()); try!(cmd.stdin.take().unwrap().write_all(input)); debug!("input written, now waiting"); let mut stdout = cmd.stdout.take().unwrap(); let mut stderr = cmd.stderr.take().unwrap(); let sink = Arc::new(Mutex::new(Vec::new())); let sink2 = sink.clone(); let stdout = thread::spawn(move || append(&sink2, &mut stdout));
identifier_body
main.rs
/* Aurélien DESBRIÈRES aurelien(at)hackers(dot)camp License GNU GPL latest */ // Rust experimentations // Generics bounds in Rust // A trait which implements the print marker: `{:?}`. use std::fmt::Debug; trait HasArea { fn area(&self) -> f64; } impl HasArea for Rectangle { fn area(&self) -> f64 { self.length * self.height } } #[derive(Debug)] struct Rectangle { length: f64, height: f64 } #[allow(dead_code)] struct Triangle { length: f64, height: f64 } // The generic `T` must implement `Debug`. Regardless // of type, this will work properly. fn print_debug<T: Debug>(t: &T) { println!("{:?}", t); } // `T` must implement `HasArea`. Any function which meets // the bound can access `HasArea`'s function `area`. fn area<T: HasArea>(t: &T) -> f64 { t.area() } fn main() {
let rectangle = Rectangle { length: 3.0, height: 4.0 }; let _triangle = Triangle { length: 3.0, height: 4.0 }; print_debug(&rectangle); println!("Area: {}", area(&rectangle)); //print_debug(&_triangle); //println!("Area: {}", area(&_triangle)); // ^ TODO: Try uncommenting these. // | Error: Does not implement either `Debug` or `HasArea`. }
identifier_body
main.rs
/* Aurélien DESBRIÈRES aurelien(at)hackers(dot)camp License GNU GPL latest */ // Rust experimentations // Generics bounds in Rust // A trait which implements the print marker: `{:?}`. use std::fmt::Debug; trait HasArea { fn area(&self) -> f64; } impl HasArea for Rectangle { fn ar
self) -> f64 { self.length * self.height } } #[derive(Debug)] struct Rectangle { length: f64, height: f64 } #[allow(dead_code)] struct Triangle { length: f64, height: f64 } // The generic `T` must implement `Debug`. Regardless // of type, this will work properly. fn print_debug<T: Debug>(t: &T) { println!("{:?}", t); } // `T` must implement `HasArea`. Any function which meets // the bound can access `HasArea`'s function `area`. fn area<T: HasArea>(t: &T) -> f64 { t.area() } fn main() { let rectangle = Rectangle { length: 3.0, height: 4.0 }; let _triangle = Triangle { length: 3.0, height: 4.0 }; print_debug(&rectangle); println!("Area: {}", area(&rectangle)); //print_debug(&_triangle); //println!("Area: {}", area(&_triangle)); // ^ TODO: Try uncommenting these. // | Error: Does not implement either `Debug` or `HasArea`. }
ea(&
identifier_name
main.rs
/* Aurélien DESBRIÈRES aurelien(at)hackers(dot)camp License GNU GPL latest */ // Rust experimentations // Generics bounds in Rust // A trait which implements the print marker: `{:?}`. use std::fmt::Debug; trait HasArea { fn area(&self) -> f64;
} #[derive(Debug)] struct Rectangle { length: f64, height: f64 } #[allow(dead_code)] struct Triangle { length: f64, height: f64 } // The generic `T` must implement `Debug`. Regardless // of type, this will work properly. fn print_debug<T: Debug>(t: &T) { println!("{:?}", t); } // `T` must implement `HasArea`. Any function which meets // the bound can access `HasArea`'s function `area`. fn area<T: HasArea>(t: &T) -> f64 { t.area() } fn main() { let rectangle = Rectangle { length: 3.0, height: 4.0 }; let _triangle = Triangle { length: 3.0, height: 4.0 }; print_debug(&rectangle); println!("Area: {}", area(&rectangle)); //print_debug(&_triangle); //println!("Area: {}", area(&_triangle)); // ^ TODO: Try uncommenting these. // | Error: Does not implement either `Debug` or `HasArea`. }
} impl HasArea for Rectangle { fn area(&self) -> f64 { self.length * self.height }
random_line_split
gpu_types.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{DevicePoint, LayoutToWorldTransform, WorldToLayoutTransform}; use gpu_cache::{GpuCacheAddress, GpuDataRequest}; use prim_store::{VECS_PER_SEGMENT, EdgeAaSegmentMask}; use render_task::RenderTaskAddress; use renderer::MAX_VERTEX_TEXTURE_WIDTH; // Contains type that must exactly match the same structures declared in GLSL. const INT_BITS: usize = 31; //TODO: convert to unsigned const CLIP_CHAIN_RECT_BITS: usize = 22; const SEGMENT_BITS: usize = INT_BITS - CLIP_CHAIN_RECT_BITS; // The guard ensures (at compile time) that the designated number of bits cover // the maximum supported segment count for the texture width. const _SEGMENT_GUARD: usize = (1 << SEGMENT_BITS) * VECS_PER_SEGMENT - MAX_VERTEX_TEXTURE_WIDTH; const EDGE_FLAG_BITS: usize = 4; const BRUSH_FLAG_BITS: usize = 4; const CLIP_SCROLL_INDEX_BITS: usize = INT_BITS - EDGE_FLAG_BITS - BRUSH_FLAG_BITS; #[derive(Copy, Clone, Debug)] #[repr(C)] pub struct ZBufferId(i32); pub struct ZBufferIdGenerator { next: i32, } impl ZBufferIdGenerator { pub fn new() -> Self { ZBufferIdGenerator { next: 0 } } pub fn next(&mut self) -> ZBufferId { let id = ZBufferId(self.next); self.next += 1; id } } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub enum RasterizationSpace { Local = 0, Screen = 1, } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub enum BoxShadowStretchMode { Stretch = 0, Simple = 1, } #[repr(i32)] #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum BlurDirection { Horizontal = 0, Vertical, } #[derive(Debug)] #[repr(C)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BlurInstance { pub task_address: RenderTaskAddress, pub src_task_address: RenderTaskAddress, pub blur_direction: BlurDirection, } /// A clipping primitive drawn into the clipping mask. /// Could be an image or a rectangle, which defines the /// way `address` is treated. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipMaskInstance { pub render_task_address: RenderTaskAddress, pub scroll_node_data_index: ClipScrollNodeIndex, pub segment: i32, pub clip_data_address: GpuCacheAddress, pub resource_address: GpuCacheAddress, } /// A border corner dot or dash drawn into the clipping mask. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipMaskBorderCornerDotDash { pub clip_mask_instance: ClipMaskInstance, pub dot_dash_data: [f32; 8], } // 32 bytes per instance should be enough for anyone! #[derive(Debug, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PrimitiveInstance { data: [i32; 8], } pub struct SimplePrimitiveInstance { pub specific_prim_address: GpuCacheAddress, pub task_address: RenderTaskAddress, pub clip_task_address: RenderTaskAddress, pub clip_chain_rect_index: ClipChainRectIndex, pub scroll_id: ClipScrollNodeIndex, pub z: ZBufferId, } impl SimplePrimitiveInstance { pub fn new( specific_prim_address: GpuCacheAddress, task_address: RenderTaskAddress, clip_task_address: RenderTaskAddress, clip_chain_rect_index: ClipChainRectIndex, scroll_id: ClipScrollNodeIndex, z: ZBufferId, ) -> Self { SimplePrimitiveInstance { specific_prim_address, task_address, clip_task_address, clip_chain_rect_index, scroll_id, z, } } pub fn build(&self, data0: i32, data1: i32, data2: i32) -> PrimitiveInstance { PrimitiveInstance { data: [ self.specific_prim_address.as_int(), self.task_address.0 as i32 | (self.clip_task_address.0 as i32) << 16, self.clip_chain_rect_index.0 as i32, self.scroll_id.0 as i32, self.z.0, data0, data1, data2, ], } } } pub struct CompositePrimitiveInstance { pub task_address: RenderTaskAddress, pub src_task_address: RenderTaskAddress, pub backdrop_task_address: RenderTaskAddress, pub data0: i32, pub data1: i32, pub z: ZBufferId, pub data2: i32, pub data3: i32, } impl CompositePrimitiveInstance { pub fn new( task_address: RenderTaskAddress, src_task_address: RenderTaskAddress, backdrop_task_address: RenderTaskAddress, data0: i32, data1: i32, z: ZBufferId, data2: i32, data3: i32, ) -> Self { CompositePrimitiveInstance { task_address, src_task_address, backdrop_task_address, data0, data1, z, data2, data3, } } } impl From<CompositePrimitiveInstance> for PrimitiveInstance { fn from(instance: CompositePrimitiveInstance) -> Self { PrimitiveInstance { data: [ instance.task_address.0 as i32, instance.src_task_address.0 as i32, instance.backdrop_task_address.0 as i32, instance.z.0, instance.data0, instance.data1, instance.data2, instance.data3, ], } } } bitflags! { /// Flags that define how the common brush shader /// code should process this instance. pub struct BrushFlags: u8 { /// Apply perspective interpolation to UVs const PERSPECTIVE_INTERPOLATION = 0x1; /// Do interpolation relative to segment rect, /// rather than primitive rect. const SEGMENT_RELATIVE = 0x2; /// Repeat UVs horizontally. const SEGMENT_REPEAT_X = 0x4; /// Repeat UVs vertically. const SEGMENT_REPEAT_Y = 0x8; } } // TODO(gw): While we are converting things over, we // need to have the instance be the same // size as an old PrimitiveInstance. In the // future, we can compress this vertex // format a lot - e.g. z, render task // addresses etc can reasonably become // a u16 type. #[repr(C)] pub struct BrushInstance { pub picture_address: RenderTaskAddress, pub prim_address: GpuCacheAddress, pub clip_chain_rect_index: ClipChainRectIndex, pub scroll_id: ClipScrollNodeIndex, pub clip_task_address: RenderTaskAddress, pub z: ZBufferId, pub segment_index: i32, pub edge_flags: EdgeAaSegmentMask, pub brush_flags: BrushFlags, pub user_data: [i32; 3], } impl From<BrushInstance> for PrimitiveInstance { fn from(instance: BrushInstance) -> Self { debug_assert_eq!(0, instance.clip_chain_rect_index.0 >> CLIP_CHAIN_RECT_BITS); debug_assert_eq!(0, instance.scroll_id.0 >> CLIP_SCROLL_INDEX_BITS); debug_assert_eq!(0, instance.segment_index >> SEGMENT_BITS); PrimitiveInstance { data: [ instance.picture_address.0 as i32 | (instance.clip_task_address.0 as i32) << 16, instance.prim_address.as_int(), instance.clip_chain_rect_index.0 as i32 | (instance.segment_index << CLIP_CHAIN_RECT_BITS), instance.z.0, instance.scroll_id.0 as i32 | ((instance.edge_flags.bits() as i32) << CLIP_SCROLL_INDEX_BITS) | ((instance.brush_flags.bits() as i32) << (CLIP_SCROLL_INDEX_BITS + EDGE_FLAG_BITS)), instance.user_data[0], instance.user_data[1], instance.user_data[2], ] } } } #[derive(Copy, Debug, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipScrollNodeIndex(pub u32); #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipScrollNodeData { pub transform: LayoutToWorldTransform, pub inv_transform: WorldToLayoutTransform, pub transform_kind: f32, pub padding: [f32; 3], } impl ClipScrollNodeData { pub fn invalid() -> Self { ClipScrollNodeData { transform: LayoutToWorldTransform::identity(), inv_transform: WorldToLayoutTransform::identity(), transform_kind: 0.0, padding: [0.0; 3], } } } #[derive(Copy, Debug, Clone, PartialEq)] #[repr(C)] pub struct ClipChainRectIndex(pub usize); // Texture cache resources can be either a simple rect, or define // a polygon within a rect by specifying a UV coordinate for each // corner. This is useful for rendering screen-space rasterized // off-screen surfaces. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum UvRectKind { // The 2d bounds of the texture cache entry define the // valid UV space for this texture cache entry. Rect, // The four vertices below define a quad within // the texture cache entry rect. The shader can // use a bilerp() to correctly interpolate a // UV coord in the vertex shader. Quad { top_left: DevicePoint, top_right: DevicePoint, bottom_left: DevicePoint, bottom_right: DevicePoint, }, } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct ImageSource { pub p0: DevicePoint, pub p1: DevicePoint,
pub uv_rect_kind: UvRectKind, } impl ImageSource { pub fn write_gpu_blocks(&self, request: &mut GpuDataRequest) { request.push([ self.p0.x, self.p0.y, self.p1.x, self.p1.y, ]); request.push([ self.texture_layer, self.user_data[0], self.user_data[1], self.user_data[2], ]); // If this is a polygon uv kind, then upload the four vertices. if let UvRectKind::Quad { top_left, top_right, bottom_left, bottom_right } = self.uv_rect_kind { request.push([ top_left.x, top_left.y, top_right.x, top_right.y, ]); request.push([ bottom_left.x, bottom_left.y, bottom_right.x, bottom_right.y, ]); } } }
pub texture_layer: f32, pub user_data: [f32; 3],
random_line_split
gpu_types.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{DevicePoint, LayoutToWorldTransform, WorldToLayoutTransform}; use gpu_cache::{GpuCacheAddress, GpuDataRequest}; use prim_store::{VECS_PER_SEGMENT, EdgeAaSegmentMask}; use render_task::RenderTaskAddress; use renderer::MAX_VERTEX_TEXTURE_WIDTH; // Contains type that must exactly match the same structures declared in GLSL. const INT_BITS: usize = 31; //TODO: convert to unsigned const CLIP_CHAIN_RECT_BITS: usize = 22; const SEGMENT_BITS: usize = INT_BITS - CLIP_CHAIN_RECT_BITS; // The guard ensures (at compile time) that the designated number of bits cover // the maximum supported segment count for the texture width. const _SEGMENT_GUARD: usize = (1 << SEGMENT_BITS) * VECS_PER_SEGMENT - MAX_VERTEX_TEXTURE_WIDTH; const EDGE_FLAG_BITS: usize = 4; const BRUSH_FLAG_BITS: usize = 4; const CLIP_SCROLL_INDEX_BITS: usize = INT_BITS - EDGE_FLAG_BITS - BRUSH_FLAG_BITS; #[derive(Copy, Clone, Debug)] #[repr(C)] pub struct ZBufferId(i32); pub struct ZBufferIdGenerator { next: i32, } impl ZBufferIdGenerator { pub fn new() -> Self { ZBufferIdGenerator { next: 0 } } pub fn next(&mut self) -> ZBufferId { let id = ZBufferId(self.next); self.next += 1; id } } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub enum RasterizationSpace { Local = 0, Screen = 1, } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub enum BoxShadowStretchMode { Stretch = 0, Simple = 1, } #[repr(i32)] #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum BlurDirection { Horizontal = 0, Vertical, } #[derive(Debug)] #[repr(C)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BlurInstance { pub task_address: RenderTaskAddress, pub src_task_address: RenderTaskAddress, pub blur_direction: BlurDirection, } /// A clipping primitive drawn into the clipping mask. /// Could be an image or a rectangle, which defines the /// way `address` is treated. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipMaskInstance { pub render_task_address: RenderTaskAddress, pub scroll_node_data_index: ClipScrollNodeIndex, pub segment: i32, pub clip_data_address: GpuCacheAddress, pub resource_address: GpuCacheAddress, } /// A border corner dot or dash drawn into the clipping mask. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipMaskBorderCornerDotDash { pub clip_mask_instance: ClipMaskInstance, pub dot_dash_data: [f32; 8], } // 32 bytes per instance should be enough for anyone! #[derive(Debug, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PrimitiveInstance { data: [i32; 8], } pub struct SimplePrimitiveInstance { pub specific_prim_address: GpuCacheAddress, pub task_address: RenderTaskAddress, pub clip_task_address: RenderTaskAddress, pub clip_chain_rect_index: ClipChainRectIndex, pub scroll_id: ClipScrollNodeIndex, pub z: ZBufferId, } impl SimplePrimitiveInstance { pub fn new( specific_prim_address: GpuCacheAddress, task_address: RenderTaskAddress, clip_task_address: RenderTaskAddress, clip_chain_rect_index: ClipChainRectIndex, scroll_id: ClipScrollNodeIndex, z: ZBufferId, ) -> Self { SimplePrimitiveInstance { specific_prim_address, task_address, clip_task_address, clip_chain_rect_index, scroll_id, z, } } pub fn build(&self, data0: i32, data1: i32, data2: i32) -> PrimitiveInstance { PrimitiveInstance { data: [ self.specific_prim_address.as_int(), self.task_address.0 as i32 | (self.clip_task_address.0 as i32) << 16, self.clip_chain_rect_index.0 as i32, self.scroll_id.0 as i32, self.z.0, data0, data1, data2, ], } } } pub struct CompositePrimitiveInstance { pub task_address: RenderTaskAddress, pub src_task_address: RenderTaskAddress, pub backdrop_task_address: RenderTaskAddress, pub data0: i32, pub data1: i32, pub z: ZBufferId, pub data2: i32, pub data3: i32, } impl CompositePrimitiveInstance { pub fn new( task_address: RenderTaskAddress, src_task_address: RenderTaskAddress, backdrop_task_address: RenderTaskAddress, data0: i32, data1: i32, z: ZBufferId, data2: i32, data3: i32, ) -> Self { CompositePrimitiveInstance { task_address, src_task_address, backdrop_task_address, data0, data1, z, data2, data3, } } } impl From<CompositePrimitiveInstance> for PrimitiveInstance { fn from(instance: CompositePrimitiveInstance) -> Self { PrimitiveInstance { data: [ instance.task_address.0 as i32, instance.src_task_address.0 as i32, instance.backdrop_task_address.0 as i32, instance.z.0, instance.data0, instance.data1, instance.data2, instance.data3, ], } } } bitflags! { /// Flags that define how the common brush shader /// code should process this instance. pub struct BrushFlags: u8 { /// Apply perspective interpolation to UVs const PERSPECTIVE_INTERPOLATION = 0x1; /// Do interpolation relative to segment rect, /// rather than primitive rect. const SEGMENT_RELATIVE = 0x2; /// Repeat UVs horizontally. const SEGMENT_REPEAT_X = 0x4; /// Repeat UVs vertically. const SEGMENT_REPEAT_Y = 0x8; } } // TODO(gw): While we are converting things over, we // need to have the instance be the same // size as an old PrimitiveInstance. In the // future, we can compress this vertex // format a lot - e.g. z, render task // addresses etc can reasonably become // a u16 type. #[repr(C)] pub struct BrushInstance { pub picture_address: RenderTaskAddress, pub prim_address: GpuCacheAddress, pub clip_chain_rect_index: ClipChainRectIndex, pub scroll_id: ClipScrollNodeIndex, pub clip_task_address: RenderTaskAddress, pub z: ZBufferId, pub segment_index: i32, pub edge_flags: EdgeAaSegmentMask, pub brush_flags: BrushFlags, pub user_data: [i32; 3], } impl From<BrushInstance> for PrimitiveInstance { fn from(instance: BrushInstance) -> Self { debug_assert_eq!(0, instance.clip_chain_rect_index.0 >> CLIP_CHAIN_RECT_BITS); debug_assert_eq!(0, instance.scroll_id.0 >> CLIP_SCROLL_INDEX_BITS); debug_assert_eq!(0, instance.segment_index >> SEGMENT_BITS); PrimitiveInstance { data: [ instance.picture_address.0 as i32 | (instance.clip_task_address.0 as i32) << 16, instance.prim_address.as_int(), instance.clip_chain_rect_index.0 as i32 | (instance.segment_index << CLIP_CHAIN_RECT_BITS), instance.z.0, instance.scroll_id.0 as i32 | ((instance.edge_flags.bits() as i32) << CLIP_SCROLL_INDEX_BITS) | ((instance.brush_flags.bits() as i32) << (CLIP_SCROLL_INDEX_BITS + EDGE_FLAG_BITS)), instance.user_data[0], instance.user_data[1], instance.user_data[2], ] } } } #[derive(Copy, Debug, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipScrollNodeIndex(pub u32); #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipScrollNodeData { pub transform: LayoutToWorldTransform, pub inv_transform: WorldToLayoutTransform, pub transform_kind: f32, pub padding: [f32; 3], } impl ClipScrollNodeData { pub fn invalid() -> Self { ClipScrollNodeData { transform: LayoutToWorldTransform::identity(), inv_transform: WorldToLayoutTransform::identity(), transform_kind: 0.0, padding: [0.0; 3], } } } #[derive(Copy, Debug, Clone, PartialEq)] #[repr(C)] pub struct ClipChainRectIndex(pub usize); // Texture cache resources can be either a simple rect, or define // a polygon within a rect by specifying a UV coordinate for each // corner. This is useful for rendering screen-space rasterized // off-screen surfaces. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum UvRectKind { // The 2d bounds of the texture cache entry define the // valid UV space for this texture cache entry. Rect, // The four vertices below define a quad within // the texture cache entry rect. The shader can // use a bilerp() to correctly interpolate a // UV coord in the vertex shader. Quad { top_left: DevicePoint, top_right: DevicePoint, bottom_left: DevicePoint, bottom_right: DevicePoint, }, } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct ImageSource { pub p0: DevicePoint, pub p1: DevicePoint, pub texture_layer: f32, pub user_data: [f32; 3], pub uv_rect_kind: UvRectKind, } impl ImageSource { pub fn write_gpu_blocks(&self, request: &mut GpuDataRequest) { request.push([ self.p0.x, self.p0.y, self.p1.x, self.p1.y, ]); request.push([ self.texture_layer, self.user_data[0], self.user_data[1], self.user_data[2], ]); // If this is a polygon uv kind, then upload the four vertices. if let UvRectKind::Quad { top_left, top_right, bottom_left, bottom_right } = self.uv_rect_kind
} }
{ request.push([ top_left.x, top_left.y, top_right.x, top_right.y, ]); request.push([ bottom_left.x, bottom_left.y, bottom_right.x, bottom_right.y, ]); }
conditional_block
gpu_types.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{DevicePoint, LayoutToWorldTransform, WorldToLayoutTransform}; use gpu_cache::{GpuCacheAddress, GpuDataRequest}; use prim_store::{VECS_PER_SEGMENT, EdgeAaSegmentMask}; use render_task::RenderTaskAddress; use renderer::MAX_VERTEX_TEXTURE_WIDTH; // Contains type that must exactly match the same structures declared in GLSL. const INT_BITS: usize = 31; //TODO: convert to unsigned const CLIP_CHAIN_RECT_BITS: usize = 22; const SEGMENT_BITS: usize = INT_BITS - CLIP_CHAIN_RECT_BITS; // The guard ensures (at compile time) that the designated number of bits cover // the maximum supported segment count for the texture width. const _SEGMENT_GUARD: usize = (1 << SEGMENT_BITS) * VECS_PER_SEGMENT - MAX_VERTEX_TEXTURE_WIDTH; const EDGE_FLAG_BITS: usize = 4; const BRUSH_FLAG_BITS: usize = 4; const CLIP_SCROLL_INDEX_BITS: usize = INT_BITS - EDGE_FLAG_BITS - BRUSH_FLAG_BITS; #[derive(Copy, Clone, Debug)] #[repr(C)] pub struct ZBufferId(i32); pub struct
{ next: i32, } impl ZBufferIdGenerator { pub fn new() -> Self { ZBufferIdGenerator { next: 0 } } pub fn next(&mut self) -> ZBufferId { let id = ZBufferId(self.next); self.next += 1; id } } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub enum RasterizationSpace { Local = 0, Screen = 1, } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub enum BoxShadowStretchMode { Stretch = 0, Simple = 1, } #[repr(i32)] #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum BlurDirection { Horizontal = 0, Vertical, } #[derive(Debug)] #[repr(C)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BlurInstance { pub task_address: RenderTaskAddress, pub src_task_address: RenderTaskAddress, pub blur_direction: BlurDirection, } /// A clipping primitive drawn into the clipping mask. /// Could be an image or a rectangle, which defines the /// way `address` is treated. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipMaskInstance { pub render_task_address: RenderTaskAddress, pub scroll_node_data_index: ClipScrollNodeIndex, pub segment: i32, pub clip_data_address: GpuCacheAddress, pub resource_address: GpuCacheAddress, } /// A border corner dot or dash drawn into the clipping mask. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipMaskBorderCornerDotDash { pub clip_mask_instance: ClipMaskInstance, pub dot_dash_data: [f32; 8], } // 32 bytes per instance should be enough for anyone! #[derive(Debug, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PrimitiveInstance { data: [i32; 8], } pub struct SimplePrimitiveInstance { pub specific_prim_address: GpuCacheAddress, pub task_address: RenderTaskAddress, pub clip_task_address: RenderTaskAddress, pub clip_chain_rect_index: ClipChainRectIndex, pub scroll_id: ClipScrollNodeIndex, pub z: ZBufferId, } impl SimplePrimitiveInstance { pub fn new( specific_prim_address: GpuCacheAddress, task_address: RenderTaskAddress, clip_task_address: RenderTaskAddress, clip_chain_rect_index: ClipChainRectIndex, scroll_id: ClipScrollNodeIndex, z: ZBufferId, ) -> Self { SimplePrimitiveInstance { specific_prim_address, task_address, clip_task_address, clip_chain_rect_index, scroll_id, z, } } pub fn build(&self, data0: i32, data1: i32, data2: i32) -> PrimitiveInstance { PrimitiveInstance { data: [ self.specific_prim_address.as_int(), self.task_address.0 as i32 | (self.clip_task_address.0 as i32) << 16, self.clip_chain_rect_index.0 as i32, self.scroll_id.0 as i32, self.z.0, data0, data1, data2, ], } } } pub struct CompositePrimitiveInstance { pub task_address: RenderTaskAddress, pub src_task_address: RenderTaskAddress, pub backdrop_task_address: RenderTaskAddress, pub data0: i32, pub data1: i32, pub z: ZBufferId, pub data2: i32, pub data3: i32, } impl CompositePrimitiveInstance { pub fn new( task_address: RenderTaskAddress, src_task_address: RenderTaskAddress, backdrop_task_address: RenderTaskAddress, data0: i32, data1: i32, z: ZBufferId, data2: i32, data3: i32, ) -> Self { CompositePrimitiveInstance { task_address, src_task_address, backdrop_task_address, data0, data1, z, data2, data3, } } } impl From<CompositePrimitiveInstance> for PrimitiveInstance { fn from(instance: CompositePrimitiveInstance) -> Self { PrimitiveInstance { data: [ instance.task_address.0 as i32, instance.src_task_address.0 as i32, instance.backdrop_task_address.0 as i32, instance.z.0, instance.data0, instance.data1, instance.data2, instance.data3, ], } } } bitflags! { /// Flags that define how the common brush shader /// code should process this instance. pub struct BrushFlags: u8 { /// Apply perspective interpolation to UVs const PERSPECTIVE_INTERPOLATION = 0x1; /// Do interpolation relative to segment rect, /// rather than primitive rect. const SEGMENT_RELATIVE = 0x2; /// Repeat UVs horizontally. const SEGMENT_REPEAT_X = 0x4; /// Repeat UVs vertically. const SEGMENT_REPEAT_Y = 0x8; } } // TODO(gw): While we are converting things over, we // need to have the instance be the same // size as an old PrimitiveInstance. In the // future, we can compress this vertex // format a lot - e.g. z, render task // addresses etc can reasonably become // a u16 type. #[repr(C)] pub struct BrushInstance { pub picture_address: RenderTaskAddress, pub prim_address: GpuCacheAddress, pub clip_chain_rect_index: ClipChainRectIndex, pub scroll_id: ClipScrollNodeIndex, pub clip_task_address: RenderTaskAddress, pub z: ZBufferId, pub segment_index: i32, pub edge_flags: EdgeAaSegmentMask, pub brush_flags: BrushFlags, pub user_data: [i32; 3], } impl From<BrushInstance> for PrimitiveInstance { fn from(instance: BrushInstance) -> Self { debug_assert_eq!(0, instance.clip_chain_rect_index.0 >> CLIP_CHAIN_RECT_BITS); debug_assert_eq!(0, instance.scroll_id.0 >> CLIP_SCROLL_INDEX_BITS); debug_assert_eq!(0, instance.segment_index >> SEGMENT_BITS); PrimitiveInstance { data: [ instance.picture_address.0 as i32 | (instance.clip_task_address.0 as i32) << 16, instance.prim_address.as_int(), instance.clip_chain_rect_index.0 as i32 | (instance.segment_index << CLIP_CHAIN_RECT_BITS), instance.z.0, instance.scroll_id.0 as i32 | ((instance.edge_flags.bits() as i32) << CLIP_SCROLL_INDEX_BITS) | ((instance.brush_flags.bits() as i32) << (CLIP_SCROLL_INDEX_BITS + EDGE_FLAG_BITS)), instance.user_data[0], instance.user_data[1], instance.user_data[2], ] } } } #[derive(Copy, Debug, Clone, PartialEq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipScrollNodeIndex(pub u32); #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] #[repr(C)] pub struct ClipScrollNodeData { pub transform: LayoutToWorldTransform, pub inv_transform: WorldToLayoutTransform, pub transform_kind: f32, pub padding: [f32; 3], } impl ClipScrollNodeData { pub fn invalid() -> Self { ClipScrollNodeData { transform: LayoutToWorldTransform::identity(), inv_transform: WorldToLayoutTransform::identity(), transform_kind: 0.0, padding: [0.0; 3], } } } #[derive(Copy, Debug, Clone, PartialEq)] #[repr(C)] pub struct ClipChainRectIndex(pub usize); // Texture cache resources can be either a simple rect, or define // a polygon within a rect by specifying a UV coordinate for each // corner. This is useful for rendering screen-space rasterized // off-screen surfaces. #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum UvRectKind { // The 2d bounds of the texture cache entry define the // valid UV space for this texture cache entry. Rect, // The four vertices below define a quad within // the texture cache entry rect. The shader can // use a bilerp() to correctly interpolate a // UV coord in the vertex shader. Quad { top_left: DevicePoint, top_right: DevicePoint, bottom_left: DevicePoint, bottom_right: DevicePoint, }, } #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct ImageSource { pub p0: DevicePoint, pub p1: DevicePoint, pub texture_layer: f32, pub user_data: [f32; 3], pub uv_rect_kind: UvRectKind, } impl ImageSource { pub fn write_gpu_blocks(&self, request: &mut GpuDataRequest) { request.push([ self.p0.x, self.p0.y, self.p1.x, self.p1.y, ]); request.push([ self.texture_layer, self.user_data[0], self.user_data[1], self.user_data[2], ]); // If this is a polygon uv kind, then upload the four vertices. if let UvRectKind::Quad { top_left, top_right, bottom_left, bottom_right } = self.uv_rect_kind { request.push([ top_left.x, top_left.y, top_right.x, top_right.y, ]); request.push([ bottom_left.x, bottom_left.y, bottom_right.x, bottom_right.y, ]); } } }
ZBufferIdGenerator
identifier_name
main.rs
extern crate config; extern crate rand; extern crate clap; extern crate nix; extern crate sys_info; extern crate anyhow; extern crate flate2; extern crate tar; #[macro_use] extern crate lazy_static; #[cfg(target_os = "linux")] extern crate gtk; #[cfg(target_os = "linux")] extern crate libappindicator; #[cfg(target_os = "linux")] extern crate libc; extern { } use std::path::{PathBuf, Path}; use clap::ArgMatches; use std::io::{self, Read, Write}; use std::fs::File; use flate2::Compression; use flate2::write::GzEncoder; mod aws; mod clip; mod conf; mod notify; mod capture; mod util; mod cli; mod ui; use conf::DropConfig; fn main() { let mut cli_app = cli::create_drop_cli_app(); let matches = cli_app.clone().get_matches(); let config = conf::load_config(&matches); if matches.is_present("file") { handle_file(config, &matches); } else if matches.is_present("screenshot") || matches.is_present("video") { handle_screen_capture(config, &matches); } else { let result = cli_app.print_help(); if result.is_err() { println!("WARNING: Error occurred attempting to print help text") } } } fn handle_screen_capture(config: DropConfig, matches: &ArgMatches) { let out_file = if matches.is_present("video") { capture_screencast(&config) } else { capture_screenshot(&config) }; let url = handle_upload_and_produce_url(&config, &out_file.as_path(), None); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_screenshot_notification(&out_file.as_path(), &config); } println!("{}", url); } fn capture_screenshot(config: &DropConfig) -> PathBuf { let out_file_name = util::generate_filename(config, None, Some("png".to_string())); let out_file = Path::new(&config.dir).join(out_file_name); capture::screenshot(out_file.as_path(), config); out_file } fn capture_screencast(config: &DropConfig) -> PathBuf { let out_file_name = util::generate_filename(config, None, Some(config.video_format.clone())); let out_file = Path::new(&config.dir).join(out_file_name); capture::screencast(out_file.as_path(), config); out_file } fn handle_file(config: DropConfig, matches: &ArgMatches) { let file = matches.value_of("file").unwrap(); if file == "-" { handle_stdin(config); } else { let path = Path::new(file); if path.is_dir()
else { handle_file_upload(config, &path); } } } fn handle_file_upload(config: DropConfig, file: &Path) { if!file.exists() { println!("File does not exist! ({:?})", file); std::process::exit(1); } else { let filename = util::generate_filename(&config, file.file_name().map(|s| util::from_os_str(s)), None); let url = handle_upload_and_produce_url(&config, &file, Some(filename.clone())); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_upload_notification(filename, &config); } println!("{}", url); } } fn archive_directory(file: &Path) -> PathBuf { let archive_path = file.with_extension("tar.gz"); let archive_file = File::create(&archive_path).unwrap(); let enc = GzEncoder::new(archive_file, Compression::default()); let mut tar = tar::Builder::new(enc); let file_name = file.file_name().map(|s| util::from_os_str(s)).unwrap_or("archive".to_string()); tar.append_dir_all(file_name, file).unwrap(); tar.finish().unwrap(); archive_path } fn handle_stdin(config: DropConfig) { let mut buffer = Vec::new(); let result = io::stdin().read_to_end(&mut buffer); if result.is_err() { println!("ERROR: Caught error while reading input from stdin"); std::process::exit(1); } let out_filename = util::generate_filename(&config, None, None); let path = Path::new(&config.dir).join(out_filename.clone()); let mut file = File::create(&path).unwrap(); let write_result = file.write_all(&buffer); if write_result.is_err() { println!("ERROR: Caught error while writing to file"); std::process::exit(1) } let url = handle_upload_and_produce_url(&config, &path, Some(out_filename.clone())); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_upload_notification(out_filename.clone(), &config); } println!("{}", url); } fn handle_upload_and_produce_url(config: &DropConfig, file: &Path, filename: Option<String>) -> String { if config.local || config.aws_bucket.is_none() || config.aws_key.is_none() || config.aws_secret.is_none() { format!("file://{}", util::path_to_str(file.canonicalize().unwrap().as_path())) } else { aws::upload_file_to_s3(&config, &file, &filename); util::create_drop_url(&config, filename.unwrap_or(util::from_os_str(file.file_name().unwrap()))) } }
{ let archive = archive_directory(&path); handle_file_upload(config, &archive.as_path()) }
conditional_block
main.rs
extern crate config; extern crate rand; extern crate clap; extern crate nix; extern crate sys_info; extern crate anyhow; extern crate flate2; extern crate tar; #[macro_use] extern crate lazy_static; #[cfg(target_os = "linux")] extern crate gtk; #[cfg(target_os = "linux")] extern crate libappindicator; #[cfg(target_os = "linux")] extern crate libc; extern { } use std::path::{PathBuf, Path}; use clap::ArgMatches; use std::io::{self, Read, Write}; use std::fs::File; use flate2::Compression; use flate2::write::GzEncoder; mod aws; mod clip; mod conf; mod notify; mod capture; mod util; mod cli; mod ui; use conf::DropConfig; fn main() { let mut cli_app = cli::create_drop_cli_app(); let matches = cli_app.clone().get_matches(); let config = conf::load_config(&matches); if matches.is_present("file") { handle_file(config, &matches); } else if matches.is_present("screenshot") || matches.is_present("video") { handle_screen_capture(config, &matches); } else { let result = cli_app.print_help(); if result.is_err() { println!("WARNING: Error occurred attempting to print help text") } } } fn handle_screen_capture(config: DropConfig, matches: &ArgMatches) { let out_file = if matches.is_present("video") { capture_screencast(&config) } else { capture_screenshot(&config) }; let url = handle_upload_and_produce_url(&config, &out_file.as_path(), None); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_screenshot_notification(&out_file.as_path(), &config); } println!("{}", url); } fn capture_screenshot(config: &DropConfig) -> PathBuf
fn capture_screencast(config: &DropConfig) -> PathBuf { let out_file_name = util::generate_filename(config, None, Some(config.video_format.clone())); let out_file = Path::new(&config.dir).join(out_file_name); capture::screencast(out_file.as_path(), config); out_file } fn handle_file(config: DropConfig, matches: &ArgMatches) { let file = matches.value_of("file").unwrap(); if file == "-" { handle_stdin(config); } else { let path = Path::new(file); if path.is_dir() { let archive = archive_directory(&path); handle_file_upload(config, &archive.as_path()) } else { handle_file_upload(config, &path); } } } fn handle_file_upload(config: DropConfig, file: &Path) { if!file.exists() { println!("File does not exist! ({:?})", file); std::process::exit(1); } else { let filename = util::generate_filename(&config, file.file_name().map(|s| util::from_os_str(s)), None); let url = handle_upload_and_produce_url(&config, &file, Some(filename.clone())); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_upload_notification(filename, &config); } println!("{}", url); } } fn archive_directory(file: &Path) -> PathBuf { let archive_path = file.with_extension("tar.gz"); let archive_file = File::create(&archive_path).unwrap(); let enc = GzEncoder::new(archive_file, Compression::default()); let mut tar = tar::Builder::new(enc); let file_name = file.file_name().map(|s| util::from_os_str(s)).unwrap_or("archive".to_string()); tar.append_dir_all(file_name, file).unwrap(); tar.finish().unwrap(); archive_path } fn handle_stdin(config: DropConfig) { let mut buffer = Vec::new(); let result = io::stdin().read_to_end(&mut buffer); if result.is_err() { println!("ERROR: Caught error while reading input from stdin"); std::process::exit(1); } let out_filename = util::generate_filename(&config, None, None); let path = Path::new(&config.dir).join(out_filename.clone()); let mut file = File::create(&path).unwrap(); let write_result = file.write_all(&buffer); if write_result.is_err() { println!("ERROR: Caught error while writing to file"); std::process::exit(1) } let url = handle_upload_and_produce_url(&config, &path, Some(out_filename.clone())); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_upload_notification(out_filename.clone(), &config); } println!("{}", url); } fn handle_upload_and_produce_url(config: &DropConfig, file: &Path, filename: Option<String>) -> String { if config.local || config.aws_bucket.is_none() || config.aws_key.is_none() || config.aws_secret.is_none() { format!("file://{}", util::path_to_str(file.canonicalize().unwrap().as_path())) } else { aws::upload_file_to_s3(&config, &file, &filename); util::create_drop_url(&config, filename.unwrap_or(util::from_os_str(file.file_name().unwrap()))) } }
{ let out_file_name = util::generate_filename(config, None, Some("png".to_string())); let out_file = Path::new(&config.dir).join(out_file_name); capture::screenshot(out_file.as_path(), config); out_file }
identifier_body
main.rs
extern crate config; extern crate rand; extern crate clap; extern crate nix; extern crate sys_info; extern crate anyhow; extern crate flate2; extern crate tar; #[macro_use] extern crate lazy_static; #[cfg(target_os = "linux")] extern crate gtk; #[cfg(target_os = "linux")] extern crate libappindicator; #[cfg(target_os = "linux")] extern crate libc; extern { } use std::path::{PathBuf, Path}; use clap::ArgMatches; use std::io::{self, Read, Write}; use std::fs::File; use flate2::Compression; use flate2::write::GzEncoder; mod aws; mod clip; mod conf; mod notify; mod capture; mod util; mod cli; mod ui; use conf::DropConfig; fn main() { let mut cli_app = cli::create_drop_cli_app(); let matches = cli_app.clone().get_matches(); let config = conf::load_config(&matches); if matches.is_present("file") { handle_file(config, &matches); } else if matches.is_present("screenshot") || matches.is_present("video") { handle_screen_capture(config, &matches); } else { let result = cli_app.print_help(); if result.is_err() { println!("WARNING: Error occurred attempting to print help text") } } } fn handle_screen_capture(config: DropConfig, matches: &ArgMatches) { let out_file = if matches.is_present("video") { capture_screencast(&config) } else { capture_screenshot(&config) }; let url = handle_upload_and_produce_url(&config, &out_file.as_path(), None); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_screenshot_notification(&out_file.as_path(), &config); } println!("{}", url); } fn capture_screenshot(config: &DropConfig) -> PathBuf { let out_file_name = util::generate_filename(config, None, Some("png".to_string())); let out_file = Path::new(&config.dir).join(out_file_name); capture::screenshot(out_file.as_path(), config); out_file } fn capture_screencast(config: &DropConfig) -> PathBuf { let out_file_name = util::generate_filename(config, None, Some(config.video_format.clone())); let out_file = Path::new(&config.dir).join(out_file_name); capture::screencast(out_file.as_path(), config); out_file } fn handle_file(config: DropConfig, matches: &ArgMatches) { let file = matches.value_of("file").unwrap(); if file == "-" { handle_stdin(config); } else { let path = Path::new(file); if path.is_dir() { let archive = archive_directory(&path); handle_file_upload(config, &archive.as_path()) } else { handle_file_upload(config, &path); } } } fn handle_file_upload(config: DropConfig, file: &Path) { if!file.exists() { println!("File does not exist! ({:?})", file); std::process::exit(1); } else { let filename = util::generate_filename(&config, file.file_name().map(|s| util::from_os_str(s)), None); let url = handle_upload_and_produce_url(&config, &file, Some(filename.clone())); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_upload_notification(filename, &config); } println!("{}", url); } } fn archive_directory(file: &Path) -> PathBuf { let archive_path = file.with_extension("tar.gz"); let archive_file = File::create(&archive_path).unwrap(); let enc = GzEncoder::new(archive_file, Compression::default()); let mut tar = tar::Builder::new(enc); let file_name = file.file_name().map(|s| util::from_os_str(s)).unwrap_or("archive".to_string()); tar.append_dir_all(file_name, file).unwrap(); tar.finish().unwrap(); archive_path } fn handle_stdin(config: DropConfig) { let mut buffer = Vec::new(); let result = io::stdin().read_to_end(&mut buffer); if result.is_err() { println!("ERROR: Caught error while reading input from stdin"); std::process::exit(1); } let out_filename = util::generate_filename(&config, None, None); let path = Path::new(&config.dir).join(out_filename.clone()); let mut file = File::create(&path).unwrap(); let write_result = file.write_all(&buffer); if write_result.is_err() { println!("ERROR: Caught error while writing to file"); std::process::exit(1) } let url = handle_upload_and_produce_url(&config, &path, Some(out_filename.clone())); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_upload_notification(out_filename.clone(), &config); } println!("{}", url); } fn
(config: &DropConfig, file: &Path, filename: Option<String>) -> String { if config.local || config.aws_bucket.is_none() || config.aws_key.is_none() || config.aws_secret.is_none() { format!("file://{}", util::path_to_str(file.canonicalize().unwrap().as_path())) } else { aws::upload_file_to_s3(&config, &file, &filename); util::create_drop_url(&config, filename.unwrap_or(util::from_os_str(file.file_name().unwrap()))) } }
handle_upload_and_produce_url
identifier_name
main.rs
extern crate config; extern crate rand; extern crate clap; extern crate nix; extern crate sys_info; extern crate anyhow; extern crate flate2; extern crate tar; #[macro_use] extern crate lazy_static; #[cfg(target_os = "linux")] extern crate gtk; #[cfg(target_os = "linux")] extern crate libappindicator; #[cfg(target_os = "linux")] extern crate libc; extern { } use std::path::{PathBuf, Path}; use clap::ArgMatches; use std::io::{self, Read, Write}; use std::fs::File; use flate2::Compression; use flate2::write::GzEncoder; mod aws; mod clip; mod conf; mod notify; mod capture; mod util; mod cli; mod ui; use conf::DropConfig; fn main() { let mut cli_app = cli::create_drop_cli_app(); let matches = cli_app.clone().get_matches(); let config = conf::load_config(&matches); if matches.is_present("file") { handle_file(config, &matches); } else if matches.is_present("screenshot") || matches.is_present("video") { handle_screen_capture(config, &matches); } else { let result = cli_app.print_help(); if result.is_err() { println!("WARNING: Error occurred attempting to print help text") } } } fn handle_screen_capture(config: DropConfig, matches: &ArgMatches) { let out_file = if matches.is_present("video") { capture_screencast(&config) } else { capture_screenshot(&config) }; let url = handle_upload_and_produce_url(&config, &out_file.as_path(), None); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_screenshot_notification(&out_file.as_path(), &config); } println!("{}", url); } fn capture_screenshot(config: &DropConfig) -> PathBuf { let out_file_name = util::generate_filename(config, None, Some("png".to_string())); let out_file = Path::new(&config.dir).join(out_file_name); capture::screenshot(out_file.as_path(), config); out_file } fn capture_screencast(config: &DropConfig) -> PathBuf { let out_file_name = util::generate_filename(config, None, Some(config.video_format.clone())); let out_file = Path::new(&config.dir).join(out_file_name); capture::screencast(out_file.as_path(), config); out_file } fn handle_file(config: DropConfig, matches: &ArgMatches) { let file = matches.value_of("file").unwrap(); if file == "-" { handle_stdin(config); } else { let path = Path::new(file); if path.is_dir() { let archive = archive_directory(&path); handle_file_upload(config, &archive.as_path()) } else { handle_file_upload(config, &path); } } } fn handle_file_upload(config: DropConfig, file: &Path) { if!file.exists() { println!("File does not exist! ({:?})", file); std::process::exit(1);
let url = handle_upload_and_produce_url(&config, &file, Some(filename.clone())); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_upload_notification(filename, &config); } println!("{}", url); } } fn archive_directory(file: &Path) -> PathBuf { let archive_path = file.with_extension("tar.gz"); let archive_file = File::create(&archive_path).unwrap(); let enc = GzEncoder::new(archive_file, Compression::default()); let mut tar = tar::Builder::new(enc); let file_name = file.file_name().map(|s| util::from_os_str(s)).unwrap_or("archive".to_string()); tar.append_dir_all(file_name, file).unwrap(); tar.finish().unwrap(); archive_path } fn handle_stdin(config: DropConfig) { let mut buffer = Vec::new(); let result = io::stdin().read_to_end(&mut buffer); if result.is_err() { println!("ERROR: Caught error while reading input from stdin"); std::process::exit(1); } let out_filename = util::generate_filename(&config, None, None); let path = Path::new(&config.dir).join(out_filename.clone()); let mut file = File::create(&path).unwrap(); let write_result = file.write_all(&buffer); if write_result.is_err() { println!("ERROR: Caught error while writing to file"); std::process::exit(1) } let url = handle_upload_and_produce_url(&config, &path, Some(out_filename.clone())); clip::copy_to_clipboard(url.clone()); if config.notifications { notify::send_upload_notification(out_filename.clone(), &config); } println!("{}", url); } fn handle_upload_and_produce_url(config: &DropConfig, file: &Path, filename: Option<String>) -> String { if config.local || config.aws_bucket.is_none() || config.aws_key.is_none() || config.aws_secret.is_none() { format!("file://{}", util::path_to_str(file.canonicalize().unwrap().as_path())) } else { aws::upload_file_to_s3(&config, &file, &filename); util::create_drop_url(&config, filename.unwrap_or(util::from_os_str(file.file_name().unwrap()))) } }
} else { let filename = util::generate_filename(&config, file.file_name().map(|s| util::from_os_str(s)), None);
random_line_split
mod.rs
pub mod g2d; pub mod g3d; use time; pub trait Drawable { fn draw(&self); } pub struct
{ pub width: u32, pub height: u32, pub refresh_rate: u16, pub bits_per_pixel: u16 } pub struct Monitor { pub virtual_y: u32, pub virtual_x: u32, pub name: String, pub display_modes: Vec<DisplayMode> } pub struct Graphics { pub display_mode: DisplayMode, pub frame_id: u32, pub delta_time: time::Duration, pub fps: u16, pub monitors: Vec<Monitor>, pub should_close: bool } impl Graphics { pub fn new(width: u32, height: u32, title: &str) -> Self { Graphics { display_mode: DisplayMode {width: width, height: height, refresh_rate: 1, bits_per_pixel: 1}, frame_id: 0, delta_time: time::Duration::seconds(1), fps: 0, monitors: vec![], should_close: false } } } #[derive(Debug)] pub struct Geometry { mesh: Mesh } #[derive(Debug)] pub struct Mesh { // verticies:, // edges:, // polygons: } // enum VertexDataType { // VertexArray, VertexBufferObject, VertexBufferObjectSubData, VertexBufferObjectWithVAO // }
DisplayMode
identifier_name
mod.rs
pub mod g2d; pub mod g3d; use time; pub trait Drawable { fn draw(&self); } pub struct DisplayMode { pub width: u32, pub height: u32, pub refresh_rate: u16, pub bits_per_pixel: u16 } pub struct Monitor { pub virtual_y: u32, pub virtual_x: u32,
} pub struct Graphics { pub display_mode: DisplayMode, pub frame_id: u32, pub delta_time: time::Duration, pub fps: u16, pub monitors: Vec<Monitor>, pub should_close: bool } impl Graphics { pub fn new(width: u32, height: u32, title: &str) -> Self { Graphics { display_mode: DisplayMode {width: width, height: height, refresh_rate: 1, bits_per_pixel: 1}, frame_id: 0, delta_time: time::Duration::seconds(1), fps: 0, monitors: vec![], should_close: false } } } #[derive(Debug)] pub struct Geometry { mesh: Mesh } #[derive(Debug)] pub struct Mesh { // verticies:, // edges:, // polygons: } // enum VertexDataType { // VertexArray, VertexBufferObject, VertexBufferObjectSubData, VertexBufferObjectWithVAO // }
pub name: String, pub display_modes: Vec<DisplayMode>
random_line_split
enum-null-pointer-opt.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(nonzero, core)] extern crate core; use core::nonzero::NonZero; use std::mem::size_of; use std::rc::Rc; use std::sync::Arc; trait Trait { fn dummy(&self) { } } fn main() { // Functions assert_eq!(size_of::<fn(isize)>(), size_of::<Option<fn(isize)>>()); assert_eq!(size_of::<extern "C" fn(isize)>(), size_of::<Option<extern "C" fn(isize)>>()); // Slices - &str / &[T] / &mut [T] assert_eq!(size_of::<&str>(), size_of::<Option<&str>>()); assert_eq!(size_of::<&[isize]>(), size_of::<Option<&[isize]>>()); assert_eq!(size_of::<&mut [isize]>(), size_of::<Option<&mut [isize]>>()); // Traits - Box<Trait> / &Trait / &mut Trait assert_eq!(size_of::<Box<Trait>>(), size_of::<Option<Box<Trait>>>()); assert_eq!(size_of::<&Trait>(), size_of::<Option<&Trait>>()); assert_eq!(size_of::<&mut Trait>(), size_of::<Option<&mut Trait>>()); // Pointers - Box<T> assert_eq!(size_of::<Box<isize>>(), size_of::<Option<Box<isize>>>()); // The optimization can't apply to raw pointers assert!(size_of::<Option<*const isize>>()!= size_of::<*const isize>()); assert!(Some(0 as *const isize).is_some()); // Can't collapse None to null struct Foo { _a: Box<isize> }
assert_eq!(size_of::<Foo>(), size_of::<Option<Foo>>()); assert_eq!(size_of::<Bar>(), size_of::<Option<Bar>>()); // and tuples assert_eq!(size_of::<(u8, Box<isize>)>(), size_of::<Option<(u8, Box<isize>)>>()); // and fixed-size arrays assert_eq!(size_of::<[Box<isize>; 1]>(), size_of::<Option<[Box<isize>; 1]>>()); // Should apply to NonZero assert_eq!(size_of::<NonZero<usize>>(), size_of::<Option<NonZero<usize>>>()); assert_eq!(size_of::<NonZero<*mut i8>>(), size_of::<Option<NonZero<*mut i8>>>()); // Should apply to types that use NonZero internally assert_eq!(size_of::<Vec<isize>>(), size_of::<Option<Vec<isize>>>()); assert_eq!(size_of::<Arc<isize>>(), size_of::<Option<Arc<isize>>>()); assert_eq!(size_of::<Rc<isize>>(), size_of::<Option<Rc<isize>>>()); // Should apply to types that have NonZero transitively assert_eq!(size_of::<String>(), size_of::<Option<String>>()); }
struct Bar(Box<isize>); // Should apply through structs
random_line_split
enum-null-pointer-opt.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(nonzero, core)] extern crate core; use core::nonzero::NonZero; use std::mem::size_of; use std::rc::Rc; use std::sync::Arc; trait Trait { fn dummy(&self) { } } fn main() { // Functions assert_eq!(size_of::<fn(isize)>(), size_of::<Option<fn(isize)>>()); assert_eq!(size_of::<extern "C" fn(isize)>(), size_of::<Option<extern "C" fn(isize)>>()); // Slices - &str / &[T] / &mut [T] assert_eq!(size_of::<&str>(), size_of::<Option<&str>>()); assert_eq!(size_of::<&[isize]>(), size_of::<Option<&[isize]>>()); assert_eq!(size_of::<&mut [isize]>(), size_of::<Option<&mut [isize]>>()); // Traits - Box<Trait> / &Trait / &mut Trait assert_eq!(size_of::<Box<Trait>>(), size_of::<Option<Box<Trait>>>()); assert_eq!(size_of::<&Trait>(), size_of::<Option<&Trait>>()); assert_eq!(size_of::<&mut Trait>(), size_of::<Option<&mut Trait>>()); // Pointers - Box<T> assert_eq!(size_of::<Box<isize>>(), size_of::<Option<Box<isize>>>()); // The optimization can't apply to raw pointers assert!(size_of::<Option<*const isize>>()!= size_of::<*const isize>()); assert!(Some(0 as *const isize).is_some()); // Can't collapse None to null struct
{ _a: Box<isize> } struct Bar(Box<isize>); // Should apply through structs assert_eq!(size_of::<Foo>(), size_of::<Option<Foo>>()); assert_eq!(size_of::<Bar>(), size_of::<Option<Bar>>()); // and tuples assert_eq!(size_of::<(u8, Box<isize>)>(), size_of::<Option<(u8, Box<isize>)>>()); // and fixed-size arrays assert_eq!(size_of::<[Box<isize>; 1]>(), size_of::<Option<[Box<isize>; 1]>>()); // Should apply to NonZero assert_eq!(size_of::<NonZero<usize>>(), size_of::<Option<NonZero<usize>>>()); assert_eq!(size_of::<NonZero<*mut i8>>(), size_of::<Option<NonZero<*mut i8>>>()); // Should apply to types that use NonZero internally assert_eq!(size_of::<Vec<isize>>(), size_of::<Option<Vec<isize>>>()); assert_eq!(size_of::<Arc<isize>>(), size_of::<Option<Arc<isize>>>()); assert_eq!(size_of::<Rc<isize>>(), size_of::<Option<Rc<isize>>>()); // Should apply to types that have NonZero transitively assert_eq!(size_of::<String>(), size_of::<Option<String>>()); }
Foo
identifier_name
optimizer.rs
use std::collections::BTreeMap; use crate::structs::Op::*; use crate::structs::{Op, OpStream}; impl OpStream { pub fn optimize(&mut self) { let mut i = 0; while i < self.ops.len() { match self.ops[i..] { [Add(a), Add(b),..] => { self.ops[i] = Add(a.wrapping_add(b)); self.ops.remove(i + 1); } [Mov(a), Mov(b),..] => { self.ops[i] = Mov(a + b); self.ops.remove(i + 1); } [Add(0),..] | [Mov(0),..] => { self.ops.remove(i); if i > 0 { i -= 1; } } [Loop(ref mut stream),..] => { stream.optimize(); if let Some(new_op) = stream.find_alternative() { self.ops[i] = new_op; } i += 1 } _ => i += 1, } } } fn find_alternative(&self) -> Option<Op>
map.remove(&0).unwrap_or(0), map.into_iter().collect(), )) } else { None } } } #[cfg(test)] mod tests { use crate::structs::Op::*; use crate::structs::OpStream; #[test] fn test_opstream_optimize() { let mut opstream = OpStream { ops: vec![ Mov(1), Mov(1), Add(0x01), Add(0xff), Add(0xff), Mov(1), Mov(-1), Loop(OpStream { ops: vec![Mov(2), Mov(3)], }), ], }; opstream.optimize(); assert_eq!( opstream, OpStream { ops: vec![Mov(2), Add(0xff), Loop(OpStream { ops: vec![Mov(5)] })] } ); } #[test] fn test_opstream_optimize_transfer() { let mut opstream = OpStream { ops: vec![Loop(OpStream { ops: vec![Add(0x01), Mov(3), Add(0xff), Mov(-3)], })], }; opstream.optimize(); assert_eq!( opstream, OpStream { ops: vec![Transfer(1, vec![(3, 255)])] } ); } }
{ let mut map = BTreeMap::<isize, u8>::new(); let mut rel_index = 0; for op in &self.ops { match *op { Add(x) => { map.insert(rel_index, map.get(&rel_index).unwrap_or(&0).wrapping_add(x)); } Mov(x) => { rel_index += x; } _ => { return None; } } } if rel_index == 0 { Some(Transfer(
identifier_body
optimizer.rs
use std::collections::BTreeMap; use crate::structs::Op::*; use crate::structs::{Op, OpStream}; impl OpStream { pub fn optimize(&mut self) { let mut i = 0; while i < self.ops.len() { match self.ops[i..] { [Add(a), Add(b),..] => { self.ops[i] = Add(a.wrapping_add(b)); self.ops.remove(i + 1); } [Mov(a), Mov(b),..] => { self.ops[i] = Mov(a + b); self.ops.remove(i + 1); } [Add(0),..] | [Mov(0),..] => { self.ops.remove(i); if i > 0 { i -= 1; } } [Loop(ref mut stream),..] => { stream.optimize(); if let Some(new_op) = stream.find_alternative() { self.ops[i] = new_op; } i += 1 } _ => i += 1, } } } fn find_alternative(&self) -> Option<Op> { let mut map = BTreeMap::<isize, u8>::new(); let mut rel_index = 0; for op in &self.ops { match *op { Add(x) => { map.insert(rel_index, map.get(&rel_index).unwrap_or(&0).wrapping_add(x)); } Mov(x) => { rel_index += x; } _ => { return None; } } } if rel_index == 0 { Some(Transfer( map.remove(&0).unwrap_or(0), map.into_iter().collect(), )) } else { None } } } #[cfg(test)] mod tests { use crate::structs::Op::*; use crate::structs::OpStream; #[test] fn test_opstream_optimize() { let mut opstream = OpStream { ops: vec![ Mov(1), Mov(1), Add(0x01), Add(0xff), Add(0xff), Mov(1), Mov(-1), Loop(OpStream { ops: vec![Mov(2), Mov(3)], }), ], }; opstream.optimize(); assert_eq!( opstream, OpStream { ops: vec![Mov(2), Add(0xff), Loop(OpStream { ops: vec![Mov(5)] })] } ); } #[test] fn
() { let mut opstream = OpStream { ops: vec![Loop(OpStream { ops: vec![Add(0x01), Mov(3), Add(0xff), Mov(-3)], })], }; opstream.optimize(); assert_eq!( opstream, OpStream { ops: vec![Transfer(1, vec![(3, 255)])] } ); } }
test_opstream_optimize_transfer
identifier_name
optimizer.rs
use std::collections::BTreeMap; use crate::structs::Op::*; use crate::structs::{Op, OpStream}; impl OpStream { pub fn optimize(&mut self) { let mut i = 0; while i < self.ops.len() { match self.ops[i..] { [Add(a), Add(b),..] => { self.ops[i] = Add(a.wrapping_add(b)); self.ops.remove(i + 1); } [Mov(a), Mov(b),..] => { self.ops[i] = Mov(a + b); self.ops.remove(i + 1); } [Add(0),..] | [Mov(0),..] => { self.ops.remove(i); if i > 0 { i -= 1; } } [Loop(ref mut stream),..] => { stream.optimize(); if let Some(new_op) = stream.find_alternative() { self.ops[i] = new_op; } i += 1 } _ => i += 1, } } } fn find_alternative(&self) -> Option<Op> { let mut map = BTreeMap::<isize, u8>::new(); let mut rel_index = 0; for op in &self.ops { match *op { Add(x) => { map.insert(rel_index, map.get(&rel_index).unwrap_or(&0).wrapping_add(x)); } Mov(x) => { rel_index += x; } _ => { return None; } } } if rel_index == 0 { Some(Transfer( map.remove(&0).unwrap_or(0), map.into_iter().collect(), )) } else { None } } }
#[test] fn test_opstream_optimize() { let mut opstream = OpStream { ops: vec![ Mov(1), Mov(1), Add(0x01), Add(0xff), Add(0xff), Mov(1), Mov(-1), Loop(OpStream { ops: vec![Mov(2), Mov(3)], }), ], }; opstream.optimize(); assert_eq!( opstream, OpStream { ops: vec![Mov(2), Add(0xff), Loop(OpStream { ops: vec![Mov(5)] })] } ); } #[test] fn test_opstream_optimize_transfer() { let mut opstream = OpStream { ops: vec![Loop(OpStream { ops: vec![Add(0x01), Mov(3), Add(0xff), Mov(-3)], })], }; opstream.optimize(); assert_eq!( opstream, OpStream { ops: vec![Transfer(1, vec![(3, 255)])] } ); } }
#[cfg(test)] mod tests { use crate::structs::Op::*; use crate::structs::OpStream;
random_line_split
http_server.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use foxbox_taxonomy::manager::*; use hyper::net::{ NetworkListener }; use iron::{ AfterMiddleware, Chain, Handler, HttpServerFactory, Iron, IronResult, Request, Response, ServerFactory }; use iron_cors::CORS; use iron::error::{ IronError }; use iron::method::Method; use iron::status::Status; use mount::Mount; use router::NoRoute; use static_router; use std::net::SocketAddr; use std::sync::Arc; use std::thread; use taxonomy_router; use tls::SniServerFactory; use traits::Controller; const THREAD_COUNT: usize = 8; struct Custom404; impl AfterMiddleware for Custom404 { fn catch(&self, _: &mut Request, err: IronError) -> IronResult<Response> { use std::io::Error as StdError; use std::io::ErrorKind; if let Some(_) = err.error.downcast::<NoRoute>() { // Router error return Ok(Response::with((Status::NotFound, format!("Unknown resource: {}", err)))); } else if let Some(err) = err.error.downcast::<StdError>() { // StaticFile error if err.kind() == ErrorKind::NotFound { return Ok(Response::with((Status::NotFound, format!("Unknown resource: {}", err)))); } } // Just let other errors go through, like 401. Err(err) } } struct Ping; impl Handler for Ping { fn handle (&self, _: &mut Request) -> IronResult<Response> { Ok(Response::with(Status::NoContent)) } } pub struct HttpServer<T: Controller> { controller: T } impl<T: Controller> HttpServer<T> { pub fn new(controller: T) -> Self { HttpServer { controller: controller } } pub fn start(&mut self, adapter_api: &Arc<AdapterManager>) { let taxonomy_chain = taxonomy_router::create(self.controller.clone(), adapter_api); let users_manager = self.controller.get_users_manager(); let mut mount = Mount::new(); mount.mount("/", static_router::create(users_manager.clone())) .mount("/ping", Ping) .mount("/api/v1", taxonomy_chain) .mount("/users", users_manager.get_router_chain()); let mut chain = Chain::new(mount); chain.link_after(Custom404); let cors = CORS::new(vec![ (vec![Method::Get], "ping".to_owned()), (vec![Method::Get, Method::Post, Method::Put, Method::Delete], "services/:service/:command".to_owned()), (vec![Method::Get], "services/list".to_owned()), // Taxonomy router paths. Keep in sync with taxonomy_router.rs (vec![Method::Get, Method::Post], "api/v1/services".to_owned()), (vec![Method::Post, Method::Delete], "api/v1/services/tags".to_owned()), (vec![Method::Get, Method::Post], "api/v1/channels".to_owned()), (vec![Method::Put], "api/v1/channels/get".to_owned()), (vec![Method::Put], "api/v1/channels/set".to_owned()), (vec![Method::Post, Method::Delete], "api/v1/channels/tags".to_owned()) ]); chain.link_after(cors); let addrs: Vec<_> = self.controller.http_as_addrs().unwrap().collect(); if self.controller.get_tls_enabled() { let mut certificate_manager = self.controller.get_certificate_manager(); let server_factory = SniServerFactory::new(&mut certificate_manager); start_server(addrs, chain, server_factory); } else { start_server(addrs, chain, HttpServerFactory {}); } } } fn start_server<TListener, T>(addrs: Vec<SocketAddr>, chain: Chain, factory: T) where TListener: NetworkListener + Send +'static, T: ServerFactory<TListener> + Send +'static { thread::Builder::new().name("HttpServer".to_owned()) .spawn(move || { Iron::new(chain) .listen_with(addrs[0], THREAD_COUNT, &factory, None) .unwrap(); }).unwrap(); } #[cfg(test)] describe! ping { before_each { use mount::Mount; use iron::Headers; use iron::status::Status; use iron_test::request; use super::Ping; let mut mount = Mount::new(); mount.mount("/ping", Ping); } it "should response 204 NoContent" { let response = request::get("http://localhost:3000/ping", Headers::new(), &mount).unwrap(); assert_eq!(response.status.unwrap(), Status::NoContent); } } #[cfg(test)] describe! http_server { before_each { extern crate hyper; use foxbox_taxonomy::manager::AdapterManager; use std::thread; use std::sync::Arc; use std::time::Duration; use stubs::controller::ControllerStub; let taxo_manager = Arc::new(AdapterManager::new(None)); let mut http_server = HttpServer::new(ControllerStub::new()); http_server.start(&taxo_manager); // HACK: Let some time for the http server to start. thread::sleep(Duration::new(3, 0)); } it "should get the appropriate CORS headers" { use iron::headers; use iron::method::Method; let endpoints = vec![ (vec![Method::Get, Method::Post, Method::Put], "services/:service/:command".to_owned()), (vec![Method::Get], "services/list".to_owned()) ]; let client = hyper::Client::new(); for endpoint in endpoints { let (_, path) = endpoint; let path = "http://localhost:3000/".to_owned() + &(path.replace(":", "foo")); let res = client.get(&path).send(); let headers = &res.unwrap().headers; assert!(headers.has::<headers::AccessControlAllowOrigin>()); assert!(headers.has::<headers::AccessControlAllowHeaders>()); assert!(headers.has::<headers::AccessControlAllowMethods>()); }; } it "should respond with 404" { use iron::status::Status; use std::io::Read; let client = hyper::Client::new(); let path = "http://localhost:3000/foo/bar".to_owned(); let mut res = client.get(&path).send().unwrap(); assert_eq!(res.status, Status::NotFound); let mut body = String::new(); res.read_to_string(&mut body).unwrap(); assert_eq!(body, "Unknown resource: No such file or \ directory (os error 2)".to_owned()); } }
random_line_split
http_server.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use foxbox_taxonomy::manager::*; use hyper::net::{ NetworkListener }; use iron::{ AfterMiddleware, Chain, Handler, HttpServerFactory, Iron, IronResult, Request, Response, ServerFactory }; use iron_cors::CORS; use iron::error::{ IronError }; use iron::method::Method; use iron::status::Status; use mount::Mount; use router::NoRoute; use static_router; use std::net::SocketAddr; use std::sync::Arc; use std::thread; use taxonomy_router; use tls::SniServerFactory; use traits::Controller; const THREAD_COUNT: usize = 8; struct Custom404; impl AfterMiddleware for Custom404 { fn catch(&self, _: &mut Request, err: IronError) -> IronResult<Response> { use std::io::Error as StdError; use std::io::ErrorKind; if let Some(_) = err.error.downcast::<NoRoute>() { // Router error return Ok(Response::with((Status::NotFound, format!("Unknown resource: {}", err)))); } else if let Some(err) = err.error.downcast::<StdError>() { // StaticFile error if err.kind() == ErrorKind::NotFound { return Ok(Response::with((Status::NotFound, format!("Unknown resource: {}", err)))); } } // Just let other errors go through, like 401. Err(err) } } struct Ping; impl Handler for Ping { fn handle (&self, _: &mut Request) -> IronResult<Response> { Ok(Response::with(Status::NoContent)) } } pub struct HttpServer<T: Controller> { controller: T } impl<T: Controller> HttpServer<T> { pub fn new(controller: T) -> Self { HttpServer { controller: controller } } pub fn start(&mut self, adapter_api: &Arc<AdapterManager>)
// Taxonomy router paths. Keep in sync with taxonomy_router.rs (vec![Method::Get, Method::Post], "api/v1/services".to_owned()), (vec![Method::Post, Method::Delete], "api/v1/services/tags".to_owned()), (vec![Method::Get, Method::Post], "api/v1/channels".to_owned()), (vec![Method::Put], "api/v1/channels/get".to_owned()), (vec![Method::Put], "api/v1/channels/set".to_owned()), (vec![Method::Post, Method::Delete], "api/v1/channels/tags".to_owned()) ]); chain.link_after(cors); let addrs: Vec<_> = self.controller.http_as_addrs().unwrap().collect(); if self.controller.get_tls_enabled() { let mut certificate_manager = self.controller.get_certificate_manager(); let server_factory = SniServerFactory::new(&mut certificate_manager); start_server(addrs, chain, server_factory); } else { start_server(addrs, chain, HttpServerFactory {}); } } } fn start_server<TListener, T>(addrs: Vec<SocketAddr>, chain: Chain, factory: T) where TListener: NetworkListener + Send +'static, T: ServerFactory<TListener> + Send +'static { thread::Builder::new().name("HttpServer".to_owned()) .spawn(move || { Iron::new(chain) .listen_with(addrs[0], THREAD_COUNT, &factory, None) .unwrap(); }).unwrap(); } #[cfg(test)] describe! ping { before_each { use mount::Mount; use iron::Headers; use iron::status::Status; use iron_test::request; use super::Ping; let mut mount = Mount::new(); mount.mount("/ping", Ping); } it "should response 204 NoContent" { let response = request::get("http://localhost:3000/ping", Headers::new(), &mount).unwrap(); assert_eq!(response.status.unwrap(), Status::NoContent); } } #[cfg(test)] describe! http_server { before_each { extern crate hyper; use foxbox_taxonomy::manager::AdapterManager; use std::thread; use std::sync::Arc; use std::time::Duration; use stubs::controller::ControllerStub; let taxo_manager = Arc::new(AdapterManager::new(None)); let mut http_server = HttpServer::new(ControllerStub::new()); http_server.start(&taxo_manager); // HACK: Let some time for the http server to start. thread::sleep(Duration::new(3, 0)); } it "should get the appropriate CORS headers" { use iron::headers; use iron::method::Method; let endpoints = vec![ (vec![Method::Get, Method::Post, Method::Put], "services/:service/:command".to_owned()), (vec![Method::Get], "services/list".to_owned()) ]; let client = hyper::Client::new(); for endpoint in endpoints { let (_, path) = endpoint; let path = "http://localhost:3000/".to_owned() + &(path.replace(":", "foo")); let res = client.get(&path).send(); let headers = &res.unwrap().headers; assert!(headers.has::<headers::AccessControlAllowOrigin>()); assert!(headers.has::<headers::AccessControlAllowHeaders>()); assert!(headers.has::<headers::AccessControlAllowMethods>()); }; } it "should respond with 404" { use iron::status::Status; use std::io::Read; let client = hyper::Client::new(); let path = "http://localhost:3000/foo/bar".to_owned(); let mut res = client.get(&path).send().unwrap(); assert_eq!(res.status, Status::NotFound); let mut body = String::new(); res.read_to_string(&mut body).unwrap(); assert_eq!(body, "Unknown resource: No such file or \ directory (os error 2)".to_owned()); } }
{ let taxonomy_chain = taxonomy_router::create(self.controller.clone(), adapter_api); let users_manager = self.controller.get_users_manager(); let mut mount = Mount::new(); mount.mount("/", static_router::create(users_manager.clone())) .mount("/ping", Ping) .mount("/api/v1", taxonomy_chain) .mount("/users", users_manager.get_router_chain()); let mut chain = Chain::new(mount); chain.link_after(Custom404); let cors = CORS::new(vec![ (vec![Method::Get], "ping".to_owned()), (vec![Method::Get, Method::Post, Method::Put, Method::Delete], "services/:service/:command".to_owned()), (vec![Method::Get], "services/list".to_owned()),
identifier_body
http_server.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use foxbox_taxonomy::manager::*; use hyper::net::{ NetworkListener }; use iron::{ AfterMiddleware, Chain, Handler, HttpServerFactory, Iron, IronResult, Request, Response, ServerFactory }; use iron_cors::CORS; use iron::error::{ IronError }; use iron::method::Method; use iron::status::Status; use mount::Mount; use router::NoRoute; use static_router; use std::net::SocketAddr; use std::sync::Arc; use std::thread; use taxonomy_router; use tls::SniServerFactory; use traits::Controller; const THREAD_COUNT: usize = 8; struct Custom404; impl AfterMiddleware for Custom404 { fn catch(&self, _: &mut Request, err: IronError) -> IronResult<Response> { use std::io::Error as StdError; use std::io::ErrorKind; if let Some(_) = err.error.downcast::<NoRoute>() { // Router error return Ok(Response::with((Status::NotFound, format!("Unknown resource: {}", err)))); } else if let Some(err) = err.error.downcast::<StdError>() { // StaticFile error if err.kind() == ErrorKind::NotFound { return Ok(Response::with((Status::NotFound, format!("Unknown resource: {}", err)))); } } // Just let other errors go through, like 401. Err(err) } } struct Ping; impl Handler for Ping { fn handle (&self, _: &mut Request) -> IronResult<Response> { Ok(Response::with(Status::NoContent)) } } pub struct HttpServer<T: Controller> { controller: T } impl<T: Controller> HttpServer<T> { pub fn new(controller: T) -> Self { HttpServer { controller: controller } } pub fn start(&mut self, adapter_api: &Arc<AdapterManager>) { let taxonomy_chain = taxonomy_router::create(self.controller.clone(), adapter_api); let users_manager = self.controller.get_users_manager(); let mut mount = Mount::new(); mount.mount("/", static_router::create(users_manager.clone())) .mount("/ping", Ping) .mount("/api/v1", taxonomy_chain) .mount("/users", users_manager.get_router_chain()); let mut chain = Chain::new(mount); chain.link_after(Custom404); let cors = CORS::new(vec![ (vec![Method::Get], "ping".to_owned()), (vec![Method::Get, Method::Post, Method::Put, Method::Delete], "services/:service/:command".to_owned()), (vec![Method::Get], "services/list".to_owned()), // Taxonomy router paths. Keep in sync with taxonomy_router.rs (vec![Method::Get, Method::Post], "api/v1/services".to_owned()), (vec![Method::Post, Method::Delete], "api/v1/services/tags".to_owned()), (vec![Method::Get, Method::Post], "api/v1/channels".to_owned()), (vec![Method::Put], "api/v1/channels/get".to_owned()), (vec![Method::Put], "api/v1/channels/set".to_owned()), (vec![Method::Post, Method::Delete], "api/v1/channels/tags".to_owned()) ]); chain.link_after(cors); let addrs: Vec<_> = self.controller.http_as_addrs().unwrap().collect(); if self.controller.get_tls_enabled() { let mut certificate_manager = self.controller.get_certificate_manager(); let server_factory = SniServerFactory::new(&mut certificate_manager); start_server(addrs, chain, server_factory); } else
} } fn start_server<TListener, T>(addrs: Vec<SocketAddr>, chain: Chain, factory: T) where TListener: NetworkListener + Send +'static, T: ServerFactory<TListener> + Send +'static { thread::Builder::new().name("HttpServer".to_owned()) .spawn(move || { Iron::new(chain) .listen_with(addrs[0], THREAD_COUNT, &factory, None) .unwrap(); }).unwrap(); } #[cfg(test)] describe! ping { before_each { use mount::Mount; use iron::Headers; use iron::status::Status; use iron_test::request; use super::Ping; let mut mount = Mount::new(); mount.mount("/ping", Ping); } it "should response 204 NoContent" { let response = request::get("http://localhost:3000/ping", Headers::new(), &mount).unwrap(); assert_eq!(response.status.unwrap(), Status::NoContent); } } #[cfg(test)] describe! http_server { before_each { extern crate hyper; use foxbox_taxonomy::manager::AdapterManager; use std::thread; use std::sync::Arc; use std::time::Duration; use stubs::controller::ControllerStub; let taxo_manager = Arc::new(AdapterManager::new(None)); let mut http_server = HttpServer::new(ControllerStub::new()); http_server.start(&taxo_manager); // HACK: Let some time for the http server to start. thread::sleep(Duration::new(3, 0)); } it "should get the appropriate CORS headers" { use iron::headers; use iron::method::Method; let endpoints = vec![ (vec![Method::Get, Method::Post, Method::Put], "services/:service/:command".to_owned()), (vec![Method::Get], "services/list".to_owned()) ]; let client = hyper::Client::new(); for endpoint in endpoints { let (_, path) = endpoint; let path = "http://localhost:3000/".to_owned() + &(path.replace(":", "foo")); let res = client.get(&path).send(); let headers = &res.unwrap().headers; assert!(headers.has::<headers::AccessControlAllowOrigin>()); assert!(headers.has::<headers::AccessControlAllowHeaders>()); assert!(headers.has::<headers::AccessControlAllowMethods>()); }; } it "should respond with 404" { use iron::status::Status; use std::io::Read; let client = hyper::Client::new(); let path = "http://localhost:3000/foo/bar".to_owned(); let mut res = client.get(&path).send().unwrap(); assert_eq!(res.status, Status::NotFound); let mut body = String::new(); res.read_to_string(&mut body).unwrap(); assert_eq!(body, "Unknown resource: No such file or \ directory (os error 2)".to_owned()); } }
{ start_server(addrs, chain, HttpServerFactory {}); }
conditional_block
http_server.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use foxbox_taxonomy::manager::*; use hyper::net::{ NetworkListener }; use iron::{ AfterMiddleware, Chain, Handler, HttpServerFactory, Iron, IronResult, Request, Response, ServerFactory }; use iron_cors::CORS; use iron::error::{ IronError }; use iron::method::Method; use iron::status::Status; use mount::Mount; use router::NoRoute; use static_router; use std::net::SocketAddr; use std::sync::Arc; use std::thread; use taxonomy_router; use tls::SniServerFactory; use traits::Controller; const THREAD_COUNT: usize = 8; struct Custom404; impl AfterMiddleware for Custom404 { fn catch(&self, _: &mut Request, err: IronError) -> IronResult<Response> { use std::io::Error as StdError; use std::io::ErrorKind; if let Some(_) = err.error.downcast::<NoRoute>() { // Router error return Ok(Response::with((Status::NotFound, format!("Unknown resource: {}", err)))); } else if let Some(err) = err.error.downcast::<StdError>() { // StaticFile error if err.kind() == ErrorKind::NotFound { return Ok(Response::with((Status::NotFound, format!("Unknown resource: {}", err)))); } } // Just let other errors go through, like 401. Err(err) } } struct Ping; impl Handler for Ping { fn handle (&self, _: &mut Request) -> IronResult<Response> { Ok(Response::with(Status::NoContent)) } } pub struct HttpServer<T: Controller> { controller: T } impl<T: Controller> HttpServer<T> { pub fn new(controller: T) -> Self { HttpServer { controller: controller } } pub fn
(&mut self, adapter_api: &Arc<AdapterManager>) { let taxonomy_chain = taxonomy_router::create(self.controller.clone(), adapter_api); let users_manager = self.controller.get_users_manager(); let mut mount = Mount::new(); mount.mount("/", static_router::create(users_manager.clone())) .mount("/ping", Ping) .mount("/api/v1", taxonomy_chain) .mount("/users", users_manager.get_router_chain()); let mut chain = Chain::new(mount); chain.link_after(Custom404); let cors = CORS::new(vec![ (vec![Method::Get], "ping".to_owned()), (vec![Method::Get, Method::Post, Method::Put, Method::Delete], "services/:service/:command".to_owned()), (vec![Method::Get], "services/list".to_owned()), // Taxonomy router paths. Keep in sync with taxonomy_router.rs (vec![Method::Get, Method::Post], "api/v1/services".to_owned()), (vec![Method::Post, Method::Delete], "api/v1/services/tags".to_owned()), (vec![Method::Get, Method::Post], "api/v1/channels".to_owned()), (vec![Method::Put], "api/v1/channels/get".to_owned()), (vec![Method::Put], "api/v1/channels/set".to_owned()), (vec![Method::Post, Method::Delete], "api/v1/channels/tags".to_owned()) ]); chain.link_after(cors); let addrs: Vec<_> = self.controller.http_as_addrs().unwrap().collect(); if self.controller.get_tls_enabled() { let mut certificate_manager = self.controller.get_certificate_manager(); let server_factory = SniServerFactory::new(&mut certificate_manager); start_server(addrs, chain, server_factory); } else { start_server(addrs, chain, HttpServerFactory {}); } } } fn start_server<TListener, T>(addrs: Vec<SocketAddr>, chain: Chain, factory: T) where TListener: NetworkListener + Send +'static, T: ServerFactory<TListener> + Send +'static { thread::Builder::new().name("HttpServer".to_owned()) .spawn(move || { Iron::new(chain) .listen_with(addrs[0], THREAD_COUNT, &factory, None) .unwrap(); }).unwrap(); } #[cfg(test)] describe! ping { before_each { use mount::Mount; use iron::Headers; use iron::status::Status; use iron_test::request; use super::Ping; let mut mount = Mount::new(); mount.mount("/ping", Ping); } it "should response 204 NoContent" { let response = request::get("http://localhost:3000/ping", Headers::new(), &mount).unwrap(); assert_eq!(response.status.unwrap(), Status::NoContent); } } #[cfg(test)] describe! http_server { before_each { extern crate hyper; use foxbox_taxonomy::manager::AdapterManager; use std::thread; use std::sync::Arc; use std::time::Duration; use stubs::controller::ControllerStub; let taxo_manager = Arc::new(AdapterManager::new(None)); let mut http_server = HttpServer::new(ControllerStub::new()); http_server.start(&taxo_manager); // HACK: Let some time for the http server to start. thread::sleep(Duration::new(3, 0)); } it "should get the appropriate CORS headers" { use iron::headers; use iron::method::Method; let endpoints = vec![ (vec![Method::Get, Method::Post, Method::Put], "services/:service/:command".to_owned()), (vec![Method::Get], "services/list".to_owned()) ]; let client = hyper::Client::new(); for endpoint in endpoints { let (_, path) = endpoint; let path = "http://localhost:3000/".to_owned() + &(path.replace(":", "foo")); let res = client.get(&path).send(); let headers = &res.unwrap().headers; assert!(headers.has::<headers::AccessControlAllowOrigin>()); assert!(headers.has::<headers::AccessControlAllowHeaders>()); assert!(headers.has::<headers::AccessControlAllowMethods>()); }; } it "should respond with 404" { use iron::status::Status; use std::io::Read; let client = hyper::Client::new(); let path = "http://localhost:3000/foo/bar".to_owned(); let mut res = client.get(&path).send().unwrap(); assert_eq!(res.status, Status::NotFound); let mut body = String::new(); res.read_to_string(&mut body).unwrap(); assert_eq!(body, "Unknown resource: No such file or \ directory (os error 2)".to_owned()); } }
start
identifier_name
raii_mutex_table.rs
use parking_lot::Mutex; use std::cmp::Eq; use std::collections::HashSet; use std::hash::Hash; pub struct
<K> { map: Mutex<HashSet<K>>, } pub struct RAIIMutexGuard<'a, K> where K: Eq + Clone + Hash, { parent: &'a RAIIMutexTable<K>, key: K, } impl<K> RAIIMutexTable<K> where K: Eq + Clone + Hash, { pub fn new() -> RAIIMutexTable<K> { RAIIMutexTable { map: Mutex::new(HashSet::new()), } } pub fn lock(&self, k: K) -> RAIIMutexGuard<K> { loop { let mut map_guard = self.map.lock(); if!map_guard.contains(&k) { map_guard.insert(k.clone()); return RAIIMutexGuard { parent: self, key: k, }; } } } pub fn unlock(&self, key: &K) { let mut map_guard = self.map.lock(); debug_assert!(map_guard.contains(key)); map_guard.remove(key); } } impl<'a, K> Drop for RAIIMutexGuard<'a, K> where K: Eq + Clone + Hash, { fn drop(&mut self) { self.parent.unlock(&self.key) } }
RAIIMutexTable
identifier_name
raii_mutex_table.rs
use parking_lot::Mutex; use std::cmp::Eq; use std::collections::HashSet; use std::hash::Hash; pub struct RAIIMutexTable<K> { map: Mutex<HashSet<K>>, } pub struct RAIIMutexGuard<'a, K> where K: Eq + Clone + Hash, { parent: &'a RAIIMutexTable<K>, key: K, } impl<K> RAIIMutexTable<K> where K: Eq + Clone + Hash, { pub fn new() -> RAIIMutexTable<K> { RAIIMutexTable { map: Mutex::new(HashSet::new()), } } pub fn lock(&self, k: K) -> RAIIMutexGuard<K> { loop { let mut map_guard = self.map.lock(); if!map_guard.contains(&k)
} } pub fn unlock(&self, key: &K) { let mut map_guard = self.map.lock(); debug_assert!(map_guard.contains(key)); map_guard.remove(key); } } impl<'a, K> Drop for RAIIMutexGuard<'a, K> where K: Eq + Clone + Hash, { fn drop(&mut self) { self.parent.unlock(&self.key) } }
{ map_guard.insert(k.clone()); return RAIIMutexGuard { parent: self, key: k, }; }
conditional_block
raii_mutex_table.rs
use parking_lot::Mutex; use std::cmp::Eq; use std::collections::HashSet; use std::hash::Hash; pub struct RAIIMutexTable<K> { map: Mutex<HashSet<K>>, } pub struct RAIIMutexGuard<'a, K> where K: Eq + Clone + Hash, { parent: &'a RAIIMutexTable<K>, key: K, } impl<K> RAIIMutexTable<K> where K: Eq + Clone + Hash, { pub fn new() -> RAIIMutexTable<K> { RAIIMutexTable { map: Mutex::new(HashSet::new()), } } pub fn lock(&self, k: K) -> RAIIMutexGuard<K> { loop { let mut map_guard = self.map.lock(); if!map_guard.contains(&k) { map_guard.insert(k.clone()); return RAIIMutexGuard { parent: self, key: k, }; } } } pub fn unlock(&self, key: &K) { let mut map_guard = self.map.lock(); debug_assert!(map_guard.contains(key)); map_guard.remove(key); }
where K: Eq + Clone + Hash, { fn drop(&mut self) { self.parent.unlock(&self.key) } }
} impl<'a, K> Drop for RAIIMutexGuard<'a, K>
random_line_split
raii_mutex_table.rs
use parking_lot::Mutex; use std::cmp::Eq; use std::collections::HashSet; use std::hash::Hash; pub struct RAIIMutexTable<K> { map: Mutex<HashSet<K>>, } pub struct RAIIMutexGuard<'a, K> where K: Eq + Clone + Hash, { parent: &'a RAIIMutexTable<K>, key: K, } impl<K> RAIIMutexTable<K> where K: Eq + Clone + Hash, { pub fn new() -> RAIIMutexTable<K> { RAIIMutexTable { map: Mutex::new(HashSet::new()), } } pub fn lock(&self, k: K) -> RAIIMutexGuard<K> { loop { let mut map_guard = self.map.lock(); if!map_guard.contains(&k) { map_guard.insert(k.clone()); return RAIIMutexGuard { parent: self, key: k, }; } } } pub fn unlock(&self, key: &K) { let mut map_guard = self.map.lock(); debug_assert!(map_guard.contains(key)); map_guard.remove(key); } } impl<'a, K> Drop for RAIIMutexGuard<'a, K> where K: Eq + Clone + Hash, { fn drop(&mut self)
}
{ self.parent.unlock(&self.key) }
identifier_body
mod.rs
use crate::traits::*; use rustc_errors::ErrorReported; use rustc_middle::mir; use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TypeFoldable}; use rustc_symbol_mangling::typeid_for_fnabi; use rustc_target::abi::call::{FnAbi, PassMode}; use std::iter; use rustc_index::bit_set::BitSet; use rustc_index::vec::IndexVec; use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo}; use self::place::PlaceRef; use rustc_middle::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { instance: Instance<'tcx>, mir: &'tcx mir::Body<'tcx>, debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>, llfn: Bx::Function, cx: &'a Bx::CodegenCx, fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>, /// When unwinding is initiated, we have to store this personality /// value somewhere so that we can load it and re-use it in the /// resume instruction. The personality is (afaik) some kind of /// value used for C++ unwinding, which must filter by type: we /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. personality_slot: Option<PlaceRef<'tcx, Bx::Value>>, /// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily /// as-needed (e.g. RPO reaching it or another block branching to it). // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`). cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, /// The funclet status of each basic block cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>, /// When targeting MSVC, this stores the cleanup info for each funclet BB. /// This is initialized at the same time as the `landing_pads` entry for the /// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge. funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>, /// This stores the cached landing/cleanup pad block for a given BB. // FIXME(eddyb) rename this to `eh_pads`. landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, /// Cached unreachable block unreachable_block: Option<Bx::BasicBlock>, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `PlaceRef` representing an alloca, but not always: /// sometimes we can skip the alloca and just store the value /// directly using an `OperandRef`, which makes for tighter LLVM /// IR. The conditions for using an `OperandRef` are as follows: /// /// - the type of the local must be judged "immediate" by `is_llvm_immediate` /// - the operand must never be referenced indirectly /// - we should not take its address using the `&` operator /// - nor should it appear in a place path like `tmp.a` /// - the operand must be defined by an rvalue that can generate immediate /// values /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>, /// All `VarDebugInfo` from the MIR body, partitioned by `Local`. /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed. per_local_var_debug_info: Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>, /// Caller location propagated if this function has `#[track_caller]`. caller_location: Option<OperandRef<'tcx, Bx::Value>>, } impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphize<T>(&self, value: T) -> T where T: Copy + TypeFoldable<'tcx>, { debug!("monomorphize: self.instance={:?}", self.instance); self.instance.subst_mir_and_normalize_erasing_regions( self.cx.tcx(), ty::ParamEnv::reveal_all(), value, ) } } enum LocalRef<'tcx, V> { Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). /// `*p` is the fat pointer that references the actual unsized place. /// Every time it is initialized, we have to reallocate the place /// and update the fat pointer. That's the reason why it is indirect. UnsizedPlace(PlaceRef<'tcx, V>), Operand(Option<OperandRef<'tcx, V>>), } impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> { fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>( bx: &mut Bx, layout: TyAndLayout<'tcx>, ) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. LocalRef::Operand(Some(OperandRef::new_zst(bx, layout))) } else { LocalRef::Operand(None) } } } /////////////////////////////////////////////////////////////////////////// #[instrument(level = "debug", skip(cx))] pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cx: &'a Bx::CodegenCx, instance: Instance<'tcx>, ) { assert!(!instance.substs.needs_infer()); let llfn = cx.get_fn(instance); let mir = cx.tcx().instance_mir(instance.def); let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); debug!("fn_abi: {:?}", fn_abi); let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir); let start_llbb = Bx::append_block(cx, llfn, "start"); let mut bx = Bx::build(cx, start_llbb); if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); } let cleanup_kinds = analyze::cleanup_kinds(&mir); let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir .basic_blocks() .indices() .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None }) .collect(); let mut fx = FunctionCx { instance, mir, llfn, fn_abi, cx, personality_slot: None, cached_llbbs, unreachable_block: None, cleanup_kinds, landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()), locals: IndexVec::new(), debug_context, per_local_var_debug_info: None, caller_location: None, }; fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx); // Evaluate all required consts; codegen later assumes that CTFE will never fail. let mut all_consts_ok = true; for const_ in &mir.required_consts { if let Err(err) = fx.eval_mir_constant(const_) { all_consts_ok = false; match err { // errored or at least linted ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {} ErrorHandled::TooGeneric => { span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err) } } } } if!all_consts_ok { // We leave the IR in some half-built state here, and rely on this code not even being // submitted to LLVM once an error was raised. return; } let memory_locals = analyze::non_ssa_locals(&fx); // Allocate variable and temp allocas fx.locals = { let args = arg_local_refs(&mut bx, &mut fx, &memory_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; let layout = bx.layout_of(fx.monomorphize(decl.ty)); assert!(!layout.ty.has_erasable_regions(cx.tcx())); if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); let llretptr = bx.get_param(0); return LocalRef::Place(PlaceRef::new_sized(llretptr, layout)); } if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout)) } else { LocalRef::Place(PlaceRef::alloca(&mut bx, layout)) } } else { debug!("alloc: {:?} -> operand", local); LocalRef::new_operand(&mut bx, layout) } }; let retptr = allocate_local(mir::RETURN_PLACE); iter::once(retptr) .chain(args.into_iter()) .chain(mir.vars_and_temps_iter().map(allocate_local)) .collect() }; // Apply debuginfo to the newly allocated locals. fx.debug_introduce_locals(&mut bx); // Codegen the body of each block using reverse postorder // FIXME(eddyb) reuse RPO iterator between `analysis` and this. for (bb, _) in traversal::reverse_postorder(&mir) { fx.codegen_block(bb); } // For backends that support CFI using type membership (i.e., testing whether a given pointer // is associated with a type identifier). if cx.tcx().sess.is_sanitizer_cfi_enabled() { let typeid = typeid_for_fnabi(cx.tcx(), fn_abi); bx.type_metadata(llfn, typeid); } } /// Produces, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, fx: &mut FunctionCx<'a, 'tcx, Bx>, memory_locals: &BitSet<mir::Local>, ) -> Vec<LocalRef<'tcx, Bx::Value>> { let mir = fx.mir; let mut idx = 0; let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize; let mut num_untupled = None; let args = mir .args_iter() .enumerate() .map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; if Some(local) == mir.spread_arg { // This argument (e.g., the last argument in the "rust-call" ABI) // is a tuple that was spread at the ABI level and now we have // to reconstruct it into a tuple local variable, from multiple // individual LLVM function arguments. let arg_ty = fx.monomorphize(arg_decl.ty); let tupled_arg_tys = match arg_ty.kind() { ty::Tuple(tys) => tys, _ => bug!("spread argument isn't a tuple?!"), }; let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_abi.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } let pr_field = place.project_field(bx, i); bx.store_fn_arg(arg, &mut llarg_idx, pr_field); } assert_eq!( None, num_untupled.replace(tupled_arg_tys.len()), "Replaced existing num_tupled" ); return LocalRef::Place(place); } if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() { let arg_ty = fx.monomorphize(arg_decl.ty); let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); bx.va_start(va_list.llval); return LocalRef::Place(va_list); } let arg = &fx.fn_abi.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } if!memory_locals.contains(local) { // We don't have to cast or keep the argument in the alloca. // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead // of putting everything in allocas just so we can use llvm.dbg.declare. let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore => { return local(OperandRef::new_zst(bx, arg.layout)); } PassMode::Direct(_) => { let llarg = bx.get_param(llarg_idx); llarg_idx += 1; return local(OperandRef::from_immediate_or_packed_pair( bx, llarg, arg.layout, )); } PassMode::Pair(..) => { let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1)); llarg_idx += 2; return local(OperandRef { val: OperandValue::Pair(a, b), layout: arg.layout, }); } _ => {} } } if arg.is_sized_indirect() { // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes let llarg = bx.get_param(llarg_idx); llarg_idx += 1; LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout)) } else if arg.is_unsized_indirect()
else { let tmp = PlaceRef::alloca(bx, arg.layout); bx.store_fn_arg(arg, &mut llarg_idx, tmp); LocalRef::Place(tmp) } }) .collect::<Vec<_>>(); if fx.instance.def.requires_caller_location(bx.tcx()) { let mir_args = if let Some(num_untupled) = num_untupled { // Subtract off the tupled argument that gets 'expanded' args.len() - 1 + num_untupled } else { args.len() }; assert_eq!( fx.fn_abi.args.len(), mir_args + 1, "#[track_caller] instance {:?} must have 1 more argument in their ABI than in their MIR", fx.instance ); let arg = fx.fn_abi.args.last().unwrap(); match arg.mode { PassMode::Direct(_) => (), _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode), } fx.caller_location = Some(OperandRef { val: OperandValue::Immediate(bx.get_param(llarg_idx)), layout: arg.layout, }); } args } mod analyze; mod block; pub mod constant; pub mod coverageinfo; pub mod debuginfo; mod intrinsic; pub mod operand; pub mod place; mod rvalue; mod statement;
{ // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. let llarg = bx.get_param(llarg_idx); llarg_idx += 1; let llextra = bx.get_param(llarg_idx); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout); indirect_operand.store(bx, tmp); LocalRef::UnsizedPlace(tmp) }
conditional_block
mod.rs
use crate::traits::*; use rustc_errors::ErrorReported; use rustc_middle::mir; use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TypeFoldable}; use rustc_symbol_mangling::typeid_for_fnabi; use rustc_target::abi::call::{FnAbi, PassMode}; use std::iter; use rustc_index::bit_set::BitSet; use rustc_index::vec::IndexVec; use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo}; use self::place::PlaceRef;
use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { instance: Instance<'tcx>, mir: &'tcx mir::Body<'tcx>, debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>, llfn: Bx::Function, cx: &'a Bx::CodegenCx, fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>, /// When unwinding is initiated, we have to store this personality /// value somewhere so that we can load it and re-use it in the /// resume instruction. The personality is (afaik) some kind of /// value used for C++ unwinding, which must filter by type: we /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. personality_slot: Option<PlaceRef<'tcx, Bx::Value>>, /// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily /// as-needed (e.g. RPO reaching it or another block branching to it). // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`). cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, /// The funclet status of each basic block cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>, /// When targeting MSVC, this stores the cleanup info for each funclet BB. /// This is initialized at the same time as the `landing_pads` entry for the /// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge. funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>, /// This stores the cached landing/cleanup pad block for a given BB. // FIXME(eddyb) rename this to `eh_pads`. landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, /// Cached unreachable block unreachable_block: Option<Bx::BasicBlock>, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `PlaceRef` representing an alloca, but not always: /// sometimes we can skip the alloca and just store the value /// directly using an `OperandRef`, which makes for tighter LLVM /// IR. The conditions for using an `OperandRef` are as follows: /// /// - the type of the local must be judged "immediate" by `is_llvm_immediate` /// - the operand must never be referenced indirectly /// - we should not take its address using the `&` operator /// - nor should it appear in a place path like `tmp.a` /// - the operand must be defined by an rvalue that can generate immediate /// values /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>, /// All `VarDebugInfo` from the MIR body, partitioned by `Local`. /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed. per_local_var_debug_info: Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>, /// Caller location propagated if this function has `#[track_caller]`. caller_location: Option<OperandRef<'tcx, Bx::Value>>, } impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphize<T>(&self, value: T) -> T where T: Copy + TypeFoldable<'tcx>, { debug!("monomorphize: self.instance={:?}", self.instance); self.instance.subst_mir_and_normalize_erasing_regions( self.cx.tcx(), ty::ParamEnv::reveal_all(), value, ) } } enum LocalRef<'tcx, V> { Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). /// `*p` is the fat pointer that references the actual unsized place. /// Every time it is initialized, we have to reallocate the place /// and update the fat pointer. That's the reason why it is indirect. UnsizedPlace(PlaceRef<'tcx, V>), Operand(Option<OperandRef<'tcx, V>>), } impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> { fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>( bx: &mut Bx, layout: TyAndLayout<'tcx>, ) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. LocalRef::Operand(Some(OperandRef::new_zst(bx, layout))) } else { LocalRef::Operand(None) } } } /////////////////////////////////////////////////////////////////////////// #[instrument(level = "debug", skip(cx))] pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cx: &'a Bx::CodegenCx, instance: Instance<'tcx>, ) { assert!(!instance.substs.needs_infer()); let llfn = cx.get_fn(instance); let mir = cx.tcx().instance_mir(instance.def); let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); debug!("fn_abi: {:?}", fn_abi); let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir); let start_llbb = Bx::append_block(cx, llfn, "start"); let mut bx = Bx::build(cx, start_llbb); if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); } let cleanup_kinds = analyze::cleanup_kinds(&mir); let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir .basic_blocks() .indices() .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None }) .collect(); let mut fx = FunctionCx { instance, mir, llfn, fn_abi, cx, personality_slot: None, cached_llbbs, unreachable_block: None, cleanup_kinds, landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()), locals: IndexVec::new(), debug_context, per_local_var_debug_info: None, caller_location: None, }; fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx); // Evaluate all required consts; codegen later assumes that CTFE will never fail. let mut all_consts_ok = true; for const_ in &mir.required_consts { if let Err(err) = fx.eval_mir_constant(const_) { all_consts_ok = false; match err { // errored or at least linted ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {} ErrorHandled::TooGeneric => { span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err) } } } } if!all_consts_ok { // We leave the IR in some half-built state here, and rely on this code not even being // submitted to LLVM once an error was raised. return; } let memory_locals = analyze::non_ssa_locals(&fx); // Allocate variable and temp allocas fx.locals = { let args = arg_local_refs(&mut bx, &mut fx, &memory_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; let layout = bx.layout_of(fx.monomorphize(decl.ty)); assert!(!layout.ty.has_erasable_regions(cx.tcx())); if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); let llretptr = bx.get_param(0); return LocalRef::Place(PlaceRef::new_sized(llretptr, layout)); } if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout)) } else { LocalRef::Place(PlaceRef::alloca(&mut bx, layout)) } } else { debug!("alloc: {:?} -> operand", local); LocalRef::new_operand(&mut bx, layout) } }; let retptr = allocate_local(mir::RETURN_PLACE); iter::once(retptr) .chain(args.into_iter()) .chain(mir.vars_and_temps_iter().map(allocate_local)) .collect() }; // Apply debuginfo to the newly allocated locals. fx.debug_introduce_locals(&mut bx); // Codegen the body of each block using reverse postorder // FIXME(eddyb) reuse RPO iterator between `analysis` and this. for (bb, _) in traversal::reverse_postorder(&mir) { fx.codegen_block(bb); } // For backends that support CFI using type membership (i.e., testing whether a given pointer // is associated with a type identifier). if cx.tcx().sess.is_sanitizer_cfi_enabled() { let typeid = typeid_for_fnabi(cx.tcx(), fn_abi); bx.type_metadata(llfn, typeid); } } /// Produces, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, fx: &mut FunctionCx<'a, 'tcx, Bx>, memory_locals: &BitSet<mir::Local>, ) -> Vec<LocalRef<'tcx, Bx::Value>> { let mir = fx.mir; let mut idx = 0; let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize; let mut num_untupled = None; let args = mir .args_iter() .enumerate() .map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; if Some(local) == mir.spread_arg { // This argument (e.g., the last argument in the "rust-call" ABI) // is a tuple that was spread at the ABI level and now we have // to reconstruct it into a tuple local variable, from multiple // individual LLVM function arguments. let arg_ty = fx.monomorphize(arg_decl.ty); let tupled_arg_tys = match arg_ty.kind() { ty::Tuple(tys) => tys, _ => bug!("spread argument isn't a tuple?!"), }; let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_abi.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } let pr_field = place.project_field(bx, i); bx.store_fn_arg(arg, &mut llarg_idx, pr_field); } assert_eq!( None, num_untupled.replace(tupled_arg_tys.len()), "Replaced existing num_tupled" ); return LocalRef::Place(place); } if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() { let arg_ty = fx.monomorphize(arg_decl.ty); let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); bx.va_start(va_list.llval); return LocalRef::Place(va_list); } let arg = &fx.fn_abi.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } if!memory_locals.contains(local) { // We don't have to cast or keep the argument in the alloca. // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead // of putting everything in allocas just so we can use llvm.dbg.declare. let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore => { return local(OperandRef::new_zst(bx, arg.layout)); } PassMode::Direct(_) => { let llarg = bx.get_param(llarg_idx); llarg_idx += 1; return local(OperandRef::from_immediate_or_packed_pair( bx, llarg, arg.layout, )); } PassMode::Pair(..) => { let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1)); llarg_idx += 2; return local(OperandRef { val: OperandValue::Pair(a, b), layout: arg.layout, }); } _ => {} } } if arg.is_sized_indirect() { // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes let llarg = bx.get_param(llarg_idx); llarg_idx += 1; LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout)) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. let llarg = bx.get_param(llarg_idx); llarg_idx += 1; let llextra = bx.get_param(llarg_idx); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout); indirect_operand.store(bx, tmp); LocalRef::UnsizedPlace(tmp) } else { let tmp = PlaceRef::alloca(bx, arg.layout); bx.store_fn_arg(arg, &mut llarg_idx, tmp); LocalRef::Place(tmp) } }) .collect::<Vec<_>>(); if fx.instance.def.requires_caller_location(bx.tcx()) { let mir_args = if let Some(num_untupled) = num_untupled { // Subtract off the tupled argument that gets 'expanded' args.len() - 1 + num_untupled } else { args.len() }; assert_eq!( fx.fn_abi.args.len(), mir_args + 1, "#[track_caller] instance {:?} must have 1 more argument in their ABI than in their MIR", fx.instance ); let arg = fx.fn_abi.args.last().unwrap(); match arg.mode { PassMode::Direct(_) => (), _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode), } fx.caller_location = Some(OperandRef { val: OperandValue::Immediate(bx.get_param(llarg_idx)), layout: arg.layout, }); } args } mod analyze; mod block; pub mod constant; pub mod coverageinfo; pub mod debuginfo; mod intrinsic; pub mod operand; pub mod place; mod rvalue; mod statement;
use rustc_middle::mir::traversal;
random_line_split
mod.rs
use crate::traits::*; use rustc_errors::ErrorReported; use rustc_middle::mir; use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TypeFoldable}; use rustc_symbol_mangling::typeid_for_fnabi; use rustc_target::abi::call::{FnAbi, PassMode}; use std::iter; use rustc_index::bit_set::BitSet; use rustc_index::vec::IndexVec; use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo}; use self::place::PlaceRef; use rustc_middle::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { instance: Instance<'tcx>, mir: &'tcx mir::Body<'tcx>, debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>, llfn: Bx::Function, cx: &'a Bx::CodegenCx, fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>, /// When unwinding is initiated, we have to store this personality /// value somewhere so that we can load it and re-use it in the /// resume instruction. The personality is (afaik) some kind of /// value used for C++ unwinding, which must filter by type: we /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. personality_slot: Option<PlaceRef<'tcx, Bx::Value>>, /// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily /// as-needed (e.g. RPO reaching it or another block branching to it). // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`). cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, /// The funclet status of each basic block cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>, /// When targeting MSVC, this stores the cleanup info for each funclet BB. /// This is initialized at the same time as the `landing_pads` entry for the /// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge. funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>, /// This stores the cached landing/cleanup pad block for a given BB. // FIXME(eddyb) rename this to `eh_pads`. landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, /// Cached unreachable block unreachable_block: Option<Bx::BasicBlock>, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `PlaceRef` representing an alloca, but not always: /// sometimes we can skip the alloca and just store the value /// directly using an `OperandRef`, which makes for tighter LLVM /// IR. The conditions for using an `OperandRef` are as follows: /// /// - the type of the local must be judged "immediate" by `is_llvm_immediate` /// - the operand must never be referenced indirectly /// - we should not take its address using the `&` operator /// - nor should it appear in a place path like `tmp.a` /// - the operand must be defined by an rvalue that can generate immediate /// values /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>, /// All `VarDebugInfo` from the MIR body, partitioned by `Local`. /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed. per_local_var_debug_info: Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>, /// Caller location propagated if this function has `#[track_caller]`. caller_location: Option<OperandRef<'tcx, Bx::Value>>, } impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphize<T>(&self, value: T) -> T where T: Copy + TypeFoldable<'tcx>, { debug!("monomorphize: self.instance={:?}", self.instance); self.instance.subst_mir_and_normalize_erasing_regions( self.cx.tcx(), ty::ParamEnv::reveal_all(), value, ) } } enum
<'tcx, V> { Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). /// `*p` is the fat pointer that references the actual unsized place. /// Every time it is initialized, we have to reallocate the place /// and update the fat pointer. That's the reason why it is indirect. UnsizedPlace(PlaceRef<'tcx, V>), Operand(Option<OperandRef<'tcx, V>>), } impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> { fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>( bx: &mut Bx, layout: TyAndLayout<'tcx>, ) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. LocalRef::Operand(Some(OperandRef::new_zst(bx, layout))) } else { LocalRef::Operand(None) } } } /////////////////////////////////////////////////////////////////////////// #[instrument(level = "debug", skip(cx))] pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cx: &'a Bx::CodegenCx, instance: Instance<'tcx>, ) { assert!(!instance.substs.needs_infer()); let llfn = cx.get_fn(instance); let mir = cx.tcx().instance_mir(instance.def); let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); debug!("fn_abi: {:?}", fn_abi); let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir); let start_llbb = Bx::append_block(cx, llfn, "start"); let mut bx = Bx::build(cx, start_llbb); if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); } let cleanup_kinds = analyze::cleanup_kinds(&mir); let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir .basic_blocks() .indices() .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None }) .collect(); let mut fx = FunctionCx { instance, mir, llfn, fn_abi, cx, personality_slot: None, cached_llbbs, unreachable_block: None, cleanup_kinds, landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()), locals: IndexVec::new(), debug_context, per_local_var_debug_info: None, caller_location: None, }; fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx); // Evaluate all required consts; codegen later assumes that CTFE will never fail. let mut all_consts_ok = true; for const_ in &mir.required_consts { if let Err(err) = fx.eval_mir_constant(const_) { all_consts_ok = false; match err { // errored or at least linted ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {} ErrorHandled::TooGeneric => { span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err) } } } } if!all_consts_ok { // We leave the IR in some half-built state here, and rely on this code not even being // submitted to LLVM once an error was raised. return; } let memory_locals = analyze::non_ssa_locals(&fx); // Allocate variable and temp allocas fx.locals = { let args = arg_local_refs(&mut bx, &mut fx, &memory_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; let layout = bx.layout_of(fx.monomorphize(decl.ty)); assert!(!layout.ty.has_erasable_regions(cx.tcx())); if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); let llretptr = bx.get_param(0); return LocalRef::Place(PlaceRef::new_sized(llretptr, layout)); } if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout)) } else { LocalRef::Place(PlaceRef::alloca(&mut bx, layout)) } } else { debug!("alloc: {:?} -> operand", local); LocalRef::new_operand(&mut bx, layout) } }; let retptr = allocate_local(mir::RETURN_PLACE); iter::once(retptr) .chain(args.into_iter()) .chain(mir.vars_and_temps_iter().map(allocate_local)) .collect() }; // Apply debuginfo to the newly allocated locals. fx.debug_introduce_locals(&mut bx); // Codegen the body of each block using reverse postorder // FIXME(eddyb) reuse RPO iterator between `analysis` and this. for (bb, _) in traversal::reverse_postorder(&mir) { fx.codegen_block(bb); } // For backends that support CFI using type membership (i.e., testing whether a given pointer // is associated with a type identifier). if cx.tcx().sess.is_sanitizer_cfi_enabled() { let typeid = typeid_for_fnabi(cx.tcx(), fn_abi); bx.type_metadata(llfn, typeid); } } /// Produces, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, fx: &mut FunctionCx<'a, 'tcx, Bx>, memory_locals: &BitSet<mir::Local>, ) -> Vec<LocalRef<'tcx, Bx::Value>> { let mir = fx.mir; let mut idx = 0; let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize; let mut num_untupled = None; let args = mir .args_iter() .enumerate() .map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; if Some(local) == mir.spread_arg { // This argument (e.g., the last argument in the "rust-call" ABI) // is a tuple that was spread at the ABI level and now we have // to reconstruct it into a tuple local variable, from multiple // individual LLVM function arguments. let arg_ty = fx.monomorphize(arg_decl.ty); let tupled_arg_tys = match arg_ty.kind() { ty::Tuple(tys) => tys, _ => bug!("spread argument isn't a tuple?!"), }; let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_abi.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } let pr_field = place.project_field(bx, i); bx.store_fn_arg(arg, &mut llarg_idx, pr_field); } assert_eq!( None, num_untupled.replace(tupled_arg_tys.len()), "Replaced existing num_tupled" ); return LocalRef::Place(place); } if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() { let arg_ty = fx.monomorphize(arg_decl.ty); let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); bx.va_start(va_list.llval); return LocalRef::Place(va_list); } let arg = &fx.fn_abi.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } if!memory_locals.contains(local) { // We don't have to cast or keep the argument in the alloca. // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead // of putting everything in allocas just so we can use llvm.dbg.declare. let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore => { return local(OperandRef::new_zst(bx, arg.layout)); } PassMode::Direct(_) => { let llarg = bx.get_param(llarg_idx); llarg_idx += 1; return local(OperandRef::from_immediate_or_packed_pair( bx, llarg, arg.layout, )); } PassMode::Pair(..) => { let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1)); llarg_idx += 2; return local(OperandRef { val: OperandValue::Pair(a, b), layout: arg.layout, }); } _ => {} } } if arg.is_sized_indirect() { // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes let llarg = bx.get_param(llarg_idx); llarg_idx += 1; LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout)) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. let llarg = bx.get_param(llarg_idx); llarg_idx += 1; let llextra = bx.get_param(llarg_idx); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout); indirect_operand.store(bx, tmp); LocalRef::UnsizedPlace(tmp) } else { let tmp = PlaceRef::alloca(bx, arg.layout); bx.store_fn_arg(arg, &mut llarg_idx, tmp); LocalRef::Place(tmp) } }) .collect::<Vec<_>>(); if fx.instance.def.requires_caller_location(bx.tcx()) { let mir_args = if let Some(num_untupled) = num_untupled { // Subtract off the tupled argument that gets 'expanded' args.len() - 1 + num_untupled } else { args.len() }; assert_eq!( fx.fn_abi.args.len(), mir_args + 1, "#[track_caller] instance {:?} must have 1 more argument in their ABI than in their MIR", fx.instance ); let arg = fx.fn_abi.args.last().unwrap(); match arg.mode { PassMode::Direct(_) => (), _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode), } fx.caller_location = Some(OperandRef { val: OperandValue::Immediate(bx.get_param(llarg_idx)), layout: arg.layout, }); } args } mod analyze; mod block; pub mod constant; pub mod coverageinfo; pub mod debuginfo; mod intrinsic; pub mod operand; pub mod place; mod rvalue; mod statement;
LocalRef
identifier_name
local_data.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Task local data management Allows storing boxes with arbitrary types inside, to be accessed anywhere within a task, keyed by a pointer to a global finaliser function. Useful for dynamic variables, singletons, and interfacing with foreign code with bad callback interfaces. To use, declare a monomorphic global function at the type to store, and use it as the 'key' when accessing. See the 'tls' tests below for examples. Casting 'Arcane Sight' reveals an overwhelming aura of Transmutation magic. */ use prelude::*; use task::local_data_priv::{local_get, local_pop, local_modify, local_set, Handle}; /** * Indexes a task-local data slot. The function's code pointer is used for * comparison. Recommended use is to write an empty function for each desired * task-local data slot (and use class destructors, not code inside the * function, if specific teardown is needed). DO NOT use multiple * instantiations of a single polymorphic function to index data of different * types; arbitrary type coercion is possible this way. * * One other exception is that this global state can be used in a destructor * context to create a circular @-box reference, which will crash during task * failure (see issue #3039). * * These two cases aside, the interface is safe. */ pub type LocalDataKey<'self,T> = &'self fn(v: @T); /** * Remove a task-local data value from the table, returning the * reference that was originally created to insert it. */ pub unsafe fn local_data_pop<T:'static>( key: LocalDataKey<T>) -> Option<@T> { local_pop(Handle::new(), key) } /** * Retrieve a task-local data value. It will also be kept alive in the * table until explicitly removed. */ pub unsafe fn local_data_get<T:'static>( key: LocalDataKey<T>) -> Option<@T> { local_get(Handle::new(), key) } /** * Store a value in task-local data. If this key already has a value, * that value is overwritten (and its destructor is run). */ pub unsafe fn local_data_set<T:'static>( key: LocalDataKey<T>, data: @T) { local_set(Handle::new(), key, data) } /** * Modify a task-local data value. If the function returns 'None', the * data is removed (and its reference dropped). */ pub unsafe fn local_data_modify<T:'static>( key: LocalDataKey<T>, modify_fn: &fn(Option<@T>) -> Option<@T>) { local_modify(Handle::new(), key, modify_fn) } #[test] fn test_tls_multitask() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"parent data"); do task::spawn { unsafe { // TLS shouldn't carry over. assert!(local_data_get(my_key).is_none()); local_data_set(my_key, @~"child data"); assert!(*(local_data_get(my_key).get()) == ~"child data"); // should be cleaned up for us
assert!(*(local_data_get(my_key).get()) == ~"parent data"); assert!(*(local_data_get(my_key).get()) == ~"parent data"); } } #[test] fn test_tls_overwrite() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"first data"); local_data_set(my_key, @~"next data"); // Shouldn't leak. assert!(*(local_data_get(my_key).get()) == ~"next data"); } } #[test] fn test_tls_pop() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"weasel"); assert!(*(local_data_pop(my_key).get()) == ~"weasel"); // Pop must remove the data from the map. assert!(local_data_pop(my_key).is_none()); } } #[test] fn test_tls_modify() { unsafe { fn my_key(_x: @~str) { } local_data_modify(my_key, |data| { match data { Some(@ref val) => fail!("unwelcome value: %s", *val), None => Some(@~"first data") } }); local_data_modify(my_key, |data| { match data { Some(@~"first data") => Some(@~"next data"), Some(@ref val) => fail!("wrong value: %s", *val), None => fail!("missing value") } }); assert!(*(local_data_pop(my_key).get()) == ~"next data"); } } #[test] fn test_tls_crust_automorestack_memorial_bug() { // This might result in a stack-canary clobber if the runtime fails to // set sp_limit to 0 when calling the cleanup extern - it might // automatically jump over to the rust stack, which causes next_c_sp // to get recorded as something within a rust stack segment. Then a // subsequent upcall (esp. for logging, think vsnprintf) would run on // a stack smaller than 1 MB. fn my_key(_x: @~str) { } do task::spawn { unsafe { local_data_set(my_key, @~"hax"); } } } #[test] fn test_tls_multiple_types() { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } do task::spawn { unsafe { local_data_set(str_key, @~"string data"); local_data_set(box_key, @@()); local_data_set(int_key, @42); } } } #[test] fn test_tls_overwrite_multiple_types() { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } do task::spawn { unsafe { local_data_set(str_key, @~"string data"); local_data_set(int_key, @42); // This could cause a segfault if overwriting-destruction is done // with the crazy polymorphic transmute rather than the provided // finaliser. local_data_set(int_key, @31337); } } } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_tls_cleanup_on_failure() { unsafe { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } local_data_set(str_key, @~"parent data"); local_data_set(box_key, @@()); do task::spawn { unsafe { // spawn_linked local_data_set(str_key, @~"string data"); local_data_set(box_key, @@()); local_data_set(int_key, @42); fail!(); } } // Not quite nondeterministic. local_data_set(int_key, @31337); fail!(); } } #[test] fn test_static_pointer() { unsafe { fn key(_x: @&'static int) { } static VALUE: int = 0; local_data_set(key, @&VALUE); } }
} } // Must work multiple times assert!(*(local_data_get(my_key).get()) == ~"parent data");
random_line_split
local_data.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Task local data management Allows storing boxes with arbitrary types inside, to be accessed anywhere within a task, keyed by a pointer to a global finaliser function. Useful for dynamic variables, singletons, and interfacing with foreign code with bad callback interfaces. To use, declare a monomorphic global function at the type to store, and use it as the 'key' when accessing. See the 'tls' tests below for examples. Casting 'Arcane Sight' reveals an overwhelming aura of Transmutation magic. */ use prelude::*; use task::local_data_priv::{local_get, local_pop, local_modify, local_set, Handle}; /** * Indexes a task-local data slot. The function's code pointer is used for * comparison. Recommended use is to write an empty function for each desired * task-local data slot (and use class destructors, not code inside the * function, if specific teardown is needed). DO NOT use multiple * instantiations of a single polymorphic function to index data of different * types; arbitrary type coercion is possible this way. * * One other exception is that this global state can be used in a destructor * context to create a circular @-box reference, which will crash during task * failure (see issue #3039). * * These two cases aside, the interface is safe. */ pub type LocalDataKey<'self,T> = &'self fn(v: @T); /** * Remove a task-local data value from the table, returning the * reference that was originally created to insert it. */ pub unsafe fn local_data_pop<T:'static>( key: LocalDataKey<T>) -> Option<@T> { local_pop(Handle::new(), key) } /** * Retrieve a task-local data value. It will also be kept alive in the * table until explicitly removed. */ pub unsafe fn local_data_get<T:'static>( key: LocalDataKey<T>) -> Option<@T> { local_get(Handle::new(), key) } /** * Store a value in task-local data. If this key already has a value, * that value is overwritten (and its destructor is run). */ pub unsafe fn local_data_set<T:'static>( key: LocalDataKey<T>, data: @T) { local_set(Handle::new(), key, data) } /** * Modify a task-local data value. If the function returns 'None', the * data is removed (and its reference dropped). */ pub unsafe fn local_data_modify<T:'static>( key: LocalDataKey<T>, modify_fn: &fn(Option<@T>) -> Option<@T>) { local_modify(Handle::new(), key, modify_fn) } #[test] fn test_tls_multitask() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"parent data"); do task::spawn { unsafe { // TLS shouldn't carry over. assert!(local_data_get(my_key).is_none()); local_data_set(my_key, @~"child data"); assert!(*(local_data_get(my_key).get()) == ~"child data"); // should be cleaned up for us } } // Must work multiple times assert!(*(local_data_get(my_key).get()) == ~"parent data"); assert!(*(local_data_get(my_key).get()) == ~"parent data"); assert!(*(local_data_get(my_key).get()) == ~"parent data"); } } #[test] fn test_tls_overwrite() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"first data"); local_data_set(my_key, @~"next data"); // Shouldn't leak. assert!(*(local_data_get(my_key).get()) == ~"next data"); } } #[test] fn
() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"weasel"); assert!(*(local_data_pop(my_key).get()) == ~"weasel"); // Pop must remove the data from the map. assert!(local_data_pop(my_key).is_none()); } } #[test] fn test_tls_modify() { unsafe { fn my_key(_x: @~str) { } local_data_modify(my_key, |data| { match data { Some(@ref val) => fail!("unwelcome value: %s", *val), None => Some(@~"first data") } }); local_data_modify(my_key, |data| { match data { Some(@~"first data") => Some(@~"next data"), Some(@ref val) => fail!("wrong value: %s", *val), None => fail!("missing value") } }); assert!(*(local_data_pop(my_key).get()) == ~"next data"); } } #[test] fn test_tls_crust_automorestack_memorial_bug() { // This might result in a stack-canary clobber if the runtime fails to // set sp_limit to 0 when calling the cleanup extern - it might // automatically jump over to the rust stack, which causes next_c_sp // to get recorded as something within a rust stack segment. Then a // subsequent upcall (esp. for logging, think vsnprintf) would run on // a stack smaller than 1 MB. fn my_key(_x: @~str) { } do task::spawn { unsafe { local_data_set(my_key, @~"hax"); } } } #[test] fn test_tls_multiple_types() { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } do task::spawn { unsafe { local_data_set(str_key, @~"string data"); local_data_set(box_key, @@()); local_data_set(int_key, @42); } } } #[test] fn test_tls_overwrite_multiple_types() { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } do task::spawn { unsafe { local_data_set(str_key, @~"string data"); local_data_set(int_key, @42); // This could cause a segfault if overwriting-destruction is done // with the crazy polymorphic transmute rather than the provided // finaliser. local_data_set(int_key, @31337); } } } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_tls_cleanup_on_failure() { unsafe { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } local_data_set(str_key, @~"parent data"); local_data_set(box_key, @@()); do task::spawn { unsafe { // spawn_linked local_data_set(str_key, @~"string data"); local_data_set(box_key, @@()); local_data_set(int_key, @42); fail!(); } } // Not quite nondeterministic. local_data_set(int_key, @31337); fail!(); } } #[test] fn test_static_pointer() { unsafe { fn key(_x: @&'static int) { } static VALUE: int = 0; local_data_set(key, @&VALUE); } }
test_tls_pop
identifier_name
local_data.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Task local data management Allows storing boxes with arbitrary types inside, to be accessed anywhere within a task, keyed by a pointer to a global finaliser function. Useful for dynamic variables, singletons, and interfacing with foreign code with bad callback interfaces. To use, declare a monomorphic global function at the type to store, and use it as the 'key' when accessing. See the 'tls' tests below for examples. Casting 'Arcane Sight' reveals an overwhelming aura of Transmutation magic. */ use prelude::*; use task::local_data_priv::{local_get, local_pop, local_modify, local_set, Handle}; /** * Indexes a task-local data slot. The function's code pointer is used for * comparison. Recommended use is to write an empty function for each desired * task-local data slot (and use class destructors, not code inside the * function, if specific teardown is needed). DO NOT use multiple * instantiations of a single polymorphic function to index data of different * types; arbitrary type coercion is possible this way. * * One other exception is that this global state can be used in a destructor * context to create a circular @-box reference, which will crash during task * failure (see issue #3039). * * These two cases aside, the interface is safe. */ pub type LocalDataKey<'self,T> = &'self fn(v: @T); /** * Remove a task-local data value from the table, returning the * reference that was originally created to insert it. */ pub unsafe fn local_data_pop<T:'static>( key: LocalDataKey<T>) -> Option<@T> { local_pop(Handle::new(), key) } /** * Retrieve a task-local data value. It will also be kept alive in the * table until explicitly removed. */ pub unsafe fn local_data_get<T:'static>( key: LocalDataKey<T>) -> Option<@T> { local_get(Handle::new(), key) } /** * Store a value in task-local data. If this key already has a value, * that value is overwritten (and its destructor is run). */ pub unsafe fn local_data_set<T:'static>( key: LocalDataKey<T>, data: @T)
/** * Modify a task-local data value. If the function returns 'None', the * data is removed (and its reference dropped). */ pub unsafe fn local_data_modify<T:'static>( key: LocalDataKey<T>, modify_fn: &fn(Option<@T>) -> Option<@T>) { local_modify(Handle::new(), key, modify_fn) } #[test] fn test_tls_multitask() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"parent data"); do task::spawn { unsafe { // TLS shouldn't carry over. assert!(local_data_get(my_key).is_none()); local_data_set(my_key, @~"child data"); assert!(*(local_data_get(my_key).get()) == ~"child data"); // should be cleaned up for us } } // Must work multiple times assert!(*(local_data_get(my_key).get()) == ~"parent data"); assert!(*(local_data_get(my_key).get()) == ~"parent data"); assert!(*(local_data_get(my_key).get()) == ~"parent data"); } } #[test] fn test_tls_overwrite() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"first data"); local_data_set(my_key, @~"next data"); // Shouldn't leak. assert!(*(local_data_get(my_key).get()) == ~"next data"); } } #[test] fn test_tls_pop() { unsafe { fn my_key(_x: @~str) { } local_data_set(my_key, @~"weasel"); assert!(*(local_data_pop(my_key).get()) == ~"weasel"); // Pop must remove the data from the map. assert!(local_data_pop(my_key).is_none()); } } #[test] fn test_tls_modify() { unsafe { fn my_key(_x: @~str) { } local_data_modify(my_key, |data| { match data { Some(@ref val) => fail!("unwelcome value: %s", *val), None => Some(@~"first data") } }); local_data_modify(my_key, |data| { match data { Some(@~"first data") => Some(@~"next data"), Some(@ref val) => fail!("wrong value: %s", *val), None => fail!("missing value") } }); assert!(*(local_data_pop(my_key).get()) == ~"next data"); } } #[test] fn test_tls_crust_automorestack_memorial_bug() { // This might result in a stack-canary clobber if the runtime fails to // set sp_limit to 0 when calling the cleanup extern - it might // automatically jump over to the rust stack, which causes next_c_sp // to get recorded as something within a rust stack segment. Then a // subsequent upcall (esp. for logging, think vsnprintf) would run on // a stack smaller than 1 MB. fn my_key(_x: @~str) { } do task::spawn { unsafe { local_data_set(my_key, @~"hax"); } } } #[test] fn test_tls_multiple_types() { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } do task::spawn { unsafe { local_data_set(str_key, @~"string data"); local_data_set(box_key, @@()); local_data_set(int_key, @42); } } } #[test] fn test_tls_overwrite_multiple_types() { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } do task::spawn { unsafe { local_data_set(str_key, @~"string data"); local_data_set(int_key, @42); // This could cause a segfault if overwriting-destruction is done // with the crazy polymorphic transmute rather than the provided // finaliser. local_data_set(int_key, @31337); } } } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_tls_cleanup_on_failure() { unsafe { fn str_key(_x: @~str) { } fn box_key(_x: @@()) { } fn int_key(_x: @int) { } local_data_set(str_key, @~"parent data"); local_data_set(box_key, @@()); do task::spawn { unsafe { // spawn_linked local_data_set(str_key, @~"string data"); local_data_set(box_key, @@()); local_data_set(int_key, @42); fail!(); } } // Not quite nondeterministic. local_data_set(int_key, @31337); fail!(); } } #[test] fn test_static_pointer() { unsafe { fn key(_x: @&'static int) { } static VALUE: int = 0; local_data_set(key, @&VALUE); } }
{ local_set(Handle::new(), key, data) }
identifier_body
csssupportsrule.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::{Parser, ParserInput}; use dom::bindings::codegen::Bindings::CSSSupportsRuleBinding; use dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods; use dom::bindings::reflector::{DomObject, reflect_dom_object}; use dom::bindings::root::DomRoot; use dom::bindings::str::DOMString; use dom::cssconditionrule::CSSConditionRule; use dom::cssrule::SpecificCSSRule; use dom::cssstylesheet::CSSStyleSheet; use dom::window::Window; use dom_struct::dom_struct; use servo_arc::Arc; use style::parser::ParserContext; use style::shared_lock::{Locked, ToCssWithGuard}; use style::stylesheets::{CssRuleType, SupportsRule}; use style::stylesheets::supports_rule::SupportsCondition; use style_traits::{ParsingMode, ToCss}; #[dom_struct] pub struct CSSSupportsRule { cssconditionrule: CSSConditionRule, #[ignore_malloc_size_of = "Arc"] supportsrule: Arc<Locked<SupportsRule>>, } impl CSSSupportsRule { fn new_inherited( parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>, ) -> CSSSupportsRule { let guard = parent_stylesheet.shared_lock().read(); let list = supportsrule.read_with(&guard).rules.clone(); CSSSupportsRule { cssconditionrule: CSSConditionRule::new_inherited(parent_stylesheet, list), supportsrule: supportsrule, } } #[allow(unrooted_must_root)] pub fn new( window: &Window, parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>, ) -> DomRoot<CSSSupportsRule> { reflect_dom_object( Box::new(CSSSupportsRule::new_inherited( parent_stylesheet, supportsrule, )), window, CSSSupportsRuleBinding::Wrap, ) } /// <https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface> pub fn get_condition_text(&self) -> DOMString { let guard = self.cssconditionrule.shared_lock().read(); let rule = self.supportsrule.read_with(&guard); rule.condition.to_css_string().into() } /// <https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface> pub fn set_condition_text(&self, text: DOMString) { let mut input = ParserInput::new(&text); let mut input = Parser::new(&mut input); let cond = SupportsCondition::parse(&mut input); if let Ok(cond) = cond { let global = self.global(); let win = global.as_window(); let url = win.Document().url(); let quirks_mode = win.Document().quirks_mode(); let context = ParserContext::new_for_cssom( &url, Some(CssRuleType::Supports), ParsingMode::DEFAULT, quirks_mode, None, None, ); let enabled = { let namespaces = self .cssconditionrule .parent_stylesheet() .style_stylesheet() .contents .namespaces .read(); cond.eval(&context, &namespaces) }; let mut guard = self.cssconditionrule.shared_lock().write(); let rule = self.supportsrule.write_with(&mut guard); rule.condition = cond; rule.enabled = enabled; } } } impl SpecificCSSRule for CSSSupportsRule { fn ty(&self) -> u16 { use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants; CSSRuleConstants::SUPPORTS_RULE } fn
(&self) -> DOMString { let guard = self.cssconditionrule.shared_lock().read(); self.supportsrule .read_with(&guard) .to_css_string(&guard) .into() } }
get_css
identifier_name
csssupportsrule.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::{Parser, ParserInput}; use dom::bindings::codegen::Bindings::CSSSupportsRuleBinding; use dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods; use dom::bindings::reflector::{DomObject, reflect_dom_object}; use dom::bindings::root::DomRoot; use dom::bindings::str::DOMString; use dom::cssconditionrule::CSSConditionRule; use dom::cssrule::SpecificCSSRule; use dom::cssstylesheet::CSSStyleSheet; use dom::window::Window; use dom_struct::dom_struct; use servo_arc::Arc; use style::parser::ParserContext; use style::shared_lock::{Locked, ToCssWithGuard}; use style::stylesheets::{CssRuleType, SupportsRule}; use style::stylesheets::supports_rule::SupportsCondition; use style_traits::{ParsingMode, ToCss}; #[dom_struct] pub struct CSSSupportsRule { cssconditionrule: CSSConditionRule, #[ignore_malloc_size_of = "Arc"] supportsrule: Arc<Locked<SupportsRule>>, } impl CSSSupportsRule { fn new_inherited( parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>, ) -> CSSSupportsRule { let guard = parent_stylesheet.shared_lock().read(); let list = supportsrule.read_with(&guard).rules.clone(); CSSSupportsRule { cssconditionrule: CSSConditionRule::new_inherited(parent_stylesheet, list), supportsrule: supportsrule, } } #[allow(unrooted_must_root)] pub fn new( window: &Window, parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>, ) -> DomRoot<CSSSupportsRule> { reflect_dom_object( Box::new(CSSSupportsRule::new_inherited( parent_stylesheet, supportsrule, )), window, CSSSupportsRuleBinding::Wrap, ) } /// <https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface> pub fn get_condition_text(&self) -> DOMString { let guard = self.cssconditionrule.shared_lock().read(); let rule = self.supportsrule.read_with(&guard); rule.condition.to_css_string().into() } /// <https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface> pub fn set_condition_text(&self, text: DOMString) { let mut input = ParserInput::new(&text); let mut input = Parser::new(&mut input); let cond = SupportsCondition::parse(&mut input); if let Ok(cond) = cond { let global = self.global(); let win = global.as_window(); let url = win.Document().url(); let quirks_mode = win.Document().quirks_mode(); let context = ParserContext::new_for_cssom( &url, Some(CssRuleType::Supports), ParsingMode::DEFAULT, quirks_mode, None, None, ); let enabled = { let namespaces = self .cssconditionrule .parent_stylesheet() .style_stylesheet() .contents .namespaces .read(); cond.eval(&context, &namespaces) }; let mut guard = self.cssconditionrule.shared_lock().write(); let rule = self.supportsrule.write_with(&mut guard); rule.condition = cond; rule.enabled = enabled;
} } impl SpecificCSSRule for CSSSupportsRule { fn ty(&self) -> u16 { use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants; CSSRuleConstants::SUPPORTS_RULE } fn get_css(&self) -> DOMString { let guard = self.cssconditionrule.shared_lock().read(); self.supportsrule .read_with(&guard) .to_css_string(&guard) .into() } }
}
random_line_split
csssupportsrule.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::{Parser, ParserInput}; use dom::bindings::codegen::Bindings::CSSSupportsRuleBinding; use dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods; use dom::bindings::reflector::{DomObject, reflect_dom_object}; use dom::bindings::root::DomRoot; use dom::bindings::str::DOMString; use dom::cssconditionrule::CSSConditionRule; use dom::cssrule::SpecificCSSRule; use dom::cssstylesheet::CSSStyleSheet; use dom::window::Window; use dom_struct::dom_struct; use servo_arc::Arc; use style::parser::ParserContext; use style::shared_lock::{Locked, ToCssWithGuard}; use style::stylesheets::{CssRuleType, SupportsRule}; use style::stylesheets::supports_rule::SupportsCondition; use style_traits::{ParsingMode, ToCss}; #[dom_struct] pub struct CSSSupportsRule { cssconditionrule: CSSConditionRule, #[ignore_malloc_size_of = "Arc"] supportsrule: Arc<Locked<SupportsRule>>, } impl CSSSupportsRule { fn new_inherited( parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>, ) -> CSSSupportsRule { let guard = parent_stylesheet.shared_lock().read(); let list = supportsrule.read_with(&guard).rules.clone(); CSSSupportsRule { cssconditionrule: CSSConditionRule::new_inherited(parent_stylesheet, list), supportsrule: supportsrule, } } #[allow(unrooted_must_root)] pub fn new( window: &Window, parent_stylesheet: &CSSStyleSheet, supportsrule: Arc<Locked<SupportsRule>>, ) -> DomRoot<CSSSupportsRule> { reflect_dom_object( Box::new(CSSSupportsRule::new_inherited( parent_stylesheet, supportsrule, )), window, CSSSupportsRuleBinding::Wrap, ) } /// <https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface> pub fn get_condition_text(&self) -> DOMString { let guard = self.cssconditionrule.shared_lock().read(); let rule = self.supportsrule.read_with(&guard); rule.condition.to_css_string().into() } /// <https://drafts.csswg.org/css-conditional-3/#the-csssupportsrule-interface> pub fn set_condition_text(&self, text: DOMString) { let mut input = ParserInput::new(&text); let mut input = Parser::new(&mut input); let cond = SupportsCondition::parse(&mut input); if let Ok(cond) = cond
.namespaces .read(); cond.eval(&context, &namespaces) }; let mut guard = self.cssconditionrule.shared_lock().write(); let rule = self.supportsrule.write_with(&mut guard); rule.condition = cond; rule.enabled = enabled; } } } impl SpecificCSSRule for CSSSupportsRule { fn ty(&self) -> u16 { use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants; CSSRuleConstants::SUPPORTS_RULE } fn get_css(&self) -> DOMString { let guard = self.cssconditionrule.shared_lock().read(); self.supportsrule .read_with(&guard) .to_css_string(&guard) .into() } }
{ let global = self.global(); let win = global.as_window(); let url = win.Document().url(); let quirks_mode = win.Document().quirks_mode(); let context = ParserContext::new_for_cssom( &url, Some(CssRuleType::Supports), ParsingMode::DEFAULT, quirks_mode, None, None, ); let enabled = { let namespaces = self .cssconditionrule .parent_stylesheet() .style_stylesheet() .contents
conditional_block