file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
getat_func.rs
|
//! # getat_func
//!
//! Split function which returns only the requested item from the created array.
//!
#[cfg(test)]
#[path = "getat_func_test.rs"]
mod getat_func_test;
use envmnt;
pub(crate) fn
|
(function_args: &Vec<String>) -> Vec<String> {
if function_args.len()!= 3 {
error!(
"split expects only 3 arguments (environment variable name, split by character, index)"
);
}
let env_key = function_args[0].clone();
let split_by = function_args[1].clone();
let index: usize = match function_args[2].parse() {
Ok(value) => value,
Err(error) => {
error!("Invalid index value: {}", &error);
return vec![]; // should not get here
}
};
if split_by.len()!= 1 {
error!("split expects a single character separator");
}
let split_by_char = split_by.chars().next().unwrap();
let value = envmnt::get_or(&env_key, "");
if value.len() > index {
let splitted = value.split(split_by_char);
let splitted_vec: Vec<String> = splitted.map(|str_value| str_value.to_string()).collect();
let value = splitted_vec[index].clone();
vec![value]
} else {
vec![]
}
}
|
invoke
|
identifier_name
|
timer.rs
|
// TODO: How can we test this module?
use std::cell::{RefCell, Cell};
use std::fmt;
use std::rc::Rc;
use cpu::irq::{self, IrqClient};
use io::regs::IoReg;
#[derive(Clone, Copy, Debug)]
pub enum Prescaler {
Div1 = 0,
Div64 = 1,
Div256 = 2,
Div1024 = 3,
}
impl Prescaler {
fn new(val: u16) -> Prescaler {
match val {
0 => Prescaler::Div1,
1 => Prescaler::Div64,
2 => Prescaler::Div256,
3 => Prescaler::Div1024,
_ => unreachable!()
}
}
}
bf!(CntReg[u16] {
prescaler: 0:1,
count_up: 2:2,
irq_enable: 6:6,
started: 7:7
});
fn get_regs(dev: &mut TimerDevice, index: usize) -> (&mut IoReg<u16>, &mut IoReg<u16>) {
match index {
0 => (&mut dev.val0, &mut dev.cnt0),
1 => (&mut dev.val1, &mut dev.cnt1),
2 => (&mut dev.val2, &mut dev.cnt2),
3 => (&mut dev.val3, &mut dev.cnt3),
_ => unreachable!()
}
}
fn reg_val_update(dev: &mut TimerDevice, index: usize) {
let val = {
let (val, _) = get_regs(dev, index);
val.get()
};
let mut state = dev._internal_state.all.borrow_mut();
state[index].set_val(val);
drop(state);
update_deadlines(&dev._internal_state);
}
fn reg_val_read(dev: &mut TimerDevice, index: usize) {
let new_val = {
let state = dev._internal_state.all.borrow_mut();
state[index].val() as u16
};
let (val, _) = get_regs(dev, index);
val.set_unchecked(new_val);
}
fn reg_cnt_update(dev: &mut TimerDevice, index: usize) {
let cnt = {
let (_, cnt) = get_regs(dev, index);
CntReg::new(cnt.get())
};
let mut state = dev._internal_state.all.borrow_mut();
if!state[index].started && cnt.started.get() == 1 {
// Set baseline deadline counter for this timer so that we can
// subtract against it later
let baseline = dev._internal_state.global_counter.get();
dev._internal_state.start_counters[index].set(baseline);
}
state[index].started = cnt.started.get() == 1;
state[index].prescaler = Prescaler::new(cnt.prescaler.get());
state[index].val_cycles = if cnt.count_up.get() == 1 {
Cycles::CountUp(state[index].val())
} else {
Cycles::Unscaled(state[index].val())
};
trace!("Setting TIMER CNT{}: {:?}", index, state);
drop(state);
update_deadlines(&dev._internal_state);
}
/// Convert CPU cycles to timer cycles using the given prescaler
fn scale(cycles: u64, prescaler: Prescaler) -> u64 {
match prescaler {
Prescaler::Div1 => cycles,
Prescaler::Div64 => cycles >> 6,
Prescaler::Div256 => cycles >> 8,
Prescaler::Div1024 => cycles >> 10
}
}
/// Convert timer cycles to CPU cycles using the given prescaler
fn unscale(clock_ticks: u64, prescaler: Prescaler) -> u64 {
match prescaler {
Prescaler::Div1 => clock_ticks,
Prescaler::Div64 => clock_ticks << 6,
Prescaler::Div256 => clock_ticks << 8,
Prescaler::Div1024 => clock_ticks << 10
}
}
fn irq(t_index: usize) -> irq::IrqType9 {
match t_index {
0 => irq::IrqType9::Timer0,
1 => irq::IrqType9::Timer1,
2 => irq::IrqType9::Timer2,
3 => irq::IrqType9::Timer3,
_ => unreachable!()
}
}
#[derive(Copy, Clone, Debug)]
enum Cycles {
CountUp(u64),
Unscaled(u64)
}
#[derive(Debug)]
pub struct TimerState {
started: bool,
val_cycles: Cycles,
prescaler: Prescaler,
}
impl TimerState {
pub fn new() -> TimerState {
TimerState {
started: false,
val_cycles: Cycles::Unscaled(0),
prescaler: Prescaler::Div1
}
}
/// Current timer value in timer cycles
fn val(&self) -> u64 {
match self.val_cycles {
Cycles::Unscaled(cyc) => scale(cyc, self.prescaler),
Cycles::CountUp(cyc) => cyc
}
}
/// Update current timer value with cycles elapsed, return true if overflow
fn incr_and_check_overflow(&mut self, clock_diff: Cycles) -> bool {
let till_overflow = self.clocks_till_overflow();
match self.val_cycles {
Cycles::Unscaled(ref mut cyc) => {
if let Cycles::Unscaled(addend) = clock_diff {
*cyc += addend;
addend >= till_overflow
} else { unreachable!() }
}
Cycles::CountUp(ref mut cyc) => {
if let Cycles::CountUp(addend) = clock_diff {
let out = addend + *cyc % (1 << 16) >= 1 << 16;
*cyc += addend;
|
}
}
/// Returns the number of CPU clocks until this timer triggers an overflow
fn clocks_till_overflow(&self) -> u64 {
match self.val_cycles {
Cycles::Unscaled(cyc) => {
let period = unscale(1 << 16, self.prescaler);
(period - cyc % period)
}
Cycles::CountUp(_) => {
// It makes no sense to do this calculation for countup timers
// since if they overflow it's because some lower timer overflowed
!0
}
}
}
/// Updates u16 value written to TIMER registers if we match a timer at `t_index`
fn set_val(&mut self, val: u16) {
match self.val_cycles {
Cycles::Unscaled(ref mut cyc)
=> *cyc = unscale(val as u64, self.prescaler),
Cycles::CountUp(ref mut cyc)
=> *cyc = val as u64,
}
}
}
#[derive(Clone)]
pub struct TimerStates {
deadline: Rc<Cell<Option<u64>>>,
pub(crate) global_counter: Rc<Cell<u64>>,
/// global_counter value for when each timer was started
start_counters: Rc<[Cell<u64>; 4]>,
all: Rc<RefCell<[TimerState; 4]>>
}
impl TimerStates {
pub fn new() -> TimerStates {
TimerStates {
deadline: Rc::new(Cell::new(None)),
global_counter: Rc::new(Cell::new(0)),
start_counters: Rc::new([
Cell::new(0), Cell::new(0),
Cell::new(0), Cell::new(0),
]),
all: Rc::new(RefCell::new([
TimerState::new(), TimerState::new(),
TimerState::new(), TimerState::new(),
]))
}
}
}
impl fmt::Debug for TimerStates {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TimerStates {{ }}")
}
}
pub fn handle_clock_update(timer_states: &TimerStates, clock_diff: u64, irq_tx: &mut irq::IrqSyncClient) {
// Update global counter
let ctr = timer_states.global_counter.get();
let new_ctr = ctr + clock_diff;
timer_states.global_counter.set(new_ctr);
// Check if we have any work to do
if!past_deadline(timer_states) {
return
}
let mut timers = timer_states.all.borrow_mut();
if let Cycles::CountUp(_) = timers[0].val_cycles {
panic!("Don't know how to handle TIMER0 as a count-up timer!");
}
// Update individual timers
let mut prev_overflowed = false;
for (index, timer) in timers.iter_mut().enumerate() {
if!timer.started {
continue;
}
let cycles = if let Cycles::CountUp(_) = timer.val_cycles {
Cycles::CountUp(prev_overflowed as u64)
} else {
let ctr = timer_states.global_counter.get();
let baseline = timer_states.start_counters[index].get();
let clock_diff = ctr - baseline;
timer_states.start_counters[index].set(ctr);
Cycles::Unscaled(clock_diff)
};
prev_overflowed = timer.incr_and_check_overflow(cycles);
if prev_overflowed {
irq_tx.assert(irq(index))
}
}
drop(timers);
update_deadlines(timer_states);
}
fn update_deadlines(timer_states: &TimerStates) {
let timers = timer_states.all.borrow();
let min_deadline = timers.iter()
.filter(|timer| timer.started)
.map(|timer| timer.clocks_till_overflow())
.min();
let ctr = timer_states.global_counter.get();
timer_states.deadline.set(min_deadline.map(|m| m + ctr));
}
fn past_deadline(timer_states: &TimerStates) -> bool {
let ctr = timer_states.global_counter.get();
if let Some(deadline) = timer_states.deadline.get() {
ctr > deadline
} else {
false
}
}
iodevice!(TimerDevice, {
internal_state: TimerStates;
regs: {
0x000 => val0: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 0);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 0);
}
0x002 => cnt0: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 0); }
0x004 => val1: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 1);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 1);
}
0x006 => cnt1: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 1); }
0x008 => val2: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 2);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 2);
}
0x00A => cnt2: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 2); }
0x00C => val3: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 3);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 3);
}
0x00E => cnt3: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 3); }
}
});
|
out
} else { unreachable!() }
}
|
random_line_split
|
timer.rs
|
// TODO: How can we test this module?
use std::cell::{RefCell, Cell};
use std::fmt;
use std::rc::Rc;
use cpu::irq::{self, IrqClient};
use io::regs::IoReg;
#[derive(Clone, Copy, Debug)]
pub enum Prescaler {
Div1 = 0,
Div64 = 1,
Div256 = 2,
Div1024 = 3,
}
impl Prescaler {
fn new(val: u16) -> Prescaler {
match val {
0 => Prescaler::Div1,
1 => Prescaler::Div64,
2 => Prescaler::Div256,
3 => Prescaler::Div1024,
_ => unreachable!()
}
}
}
bf!(CntReg[u16] {
prescaler: 0:1,
count_up: 2:2,
irq_enable: 6:6,
started: 7:7
});
fn get_regs(dev: &mut TimerDevice, index: usize) -> (&mut IoReg<u16>, &mut IoReg<u16>) {
match index {
0 => (&mut dev.val0, &mut dev.cnt0),
1 => (&mut dev.val1, &mut dev.cnt1),
2 => (&mut dev.val2, &mut dev.cnt2),
3 => (&mut dev.val3, &mut dev.cnt3),
_ => unreachable!()
}
}
fn reg_val_update(dev: &mut TimerDevice, index: usize) {
let val = {
let (val, _) = get_regs(dev, index);
val.get()
};
let mut state = dev._internal_state.all.borrow_mut();
state[index].set_val(val);
drop(state);
update_deadlines(&dev._internal_state);
}
fn reg_val_read(dev: &mut TimerDevice, index: usize) {
let new_val = {
let state = dev._internal_state.all.borrow_mut();
state[index].val() as u16
};
let (val, _) = get_regs(dev, index);
val.set_unchecked(new_val);
}
fn reg_cnt_update(dev: &mut TimerDevice, index: usize) {
let cnt = {
let (_, cnt) = get_regs(dev, index);
CntReg::new(cnt.get())
};
let mut state = dev._internal_state.all.borrow_mut();
if!state[index].started && cnt.started.get() == 1 {
// Set baseline deadline counter for this timer so that we can
// subtract against it later
let baseline = dev._internal_state.global_counter.get();
dev._internal_state.start_counters[index].set(baseline);
}
state[index].started = cnt.started.get() == 1;
state[index].prescaler = Prescaler::new(cnt.prescaler.get());
state[index].val_cycles = if cnt.count_up.get() == 1 {
Cycles::CountUp(state[index].val())
} else {
Cycles::Unscaled(state[index].val())
};
trace!("Setting TIMER CNT{}: {:?}", index, state);
drop(state);
update_deadlines(&dev._internal_state);
}
/// Convert CPU cycles to timer cycles using the given prescaler
fn scale(cycles: u64, prescaler: Prescaler) -> u64 {
match prescaler {
Prescaler::Div1 => cycles,
Prescaler::Div64 => cycles >> 6,
Prescaler::Div256 => cycles >> 8,
Prescaler::Div1024 => cycles >> 10
}
}
/// Convert timer cycles to CPU cycles using the given prescaler
fn unscale(clock_ticks: u64, prescaler: Prescaler) -> u64 {
match prescaler {
Prescaler::Div1 => clock_ticks,
Prescaler::Div64 => clock_ticks << 6,
Prescaler::Div256 => clock_ticks << 8,
Prescaler::Div1024 => clock_ticks << 10
}
}
fn irq(t_index: usize) -> irq::IrqType9 {
match t_index {
0 => irq::IrqType9::Timer0,
1 => irq::IrqType9::Timer1,
2 => irq::IrqType9::Timer2,
3 => irq::IrqType9::Timer3,
_ => unreachable!()
}
}
#[derive(Copy, Clone, Debug)]
enum Cycles {
CountUp(u64),
Unscaled(u64)
}
#[derive(Debug)]
pub struct TimerState {
started: bool,
val_cycles: Cycles,
prescaler: Prescaler,
}
impl TimerState {
pub fn new() -> TimerState {
TimerState {
started: false,
val_cycles: Cycles::Unscaled(0),
prescaler: Prescaler::Div1
}
}
/// Current timer value in timer cycles
fn val(&self) -> u64 {
match self.val_cycles {
Cycles::Unscaled(cyc) => scale(cyc, self.prescaler),
Cycles::CountUp(cyc) => cyc
}
}
/// Update current timer value with cycles elapsed, return true if overflow
fn incr_and_check_overflow(&mut self, clock_diff: Cycles) -> bool {
let till_overflow = self.clocks_till_overflow();
match self.val_cycles {
Cycles::Unscaled(ref mut cyc) => {
if let Cycles::Unscaled(addend) = clock_diff {
*cyc += addend;
addend >= till_overflow
} else { unreachable!() }
}
Cycles::CountUp(ref mut cyc) => {
if let Cycles::CountUp(addend) = clock_diff {
let out = addend + *cyc % (1 << 16) >= 1 << 16;
*cyc += addend;
out
} else { unreachable!() }
}
}
}
/// Returns the number of CPU clocks until this timer triggers an overflow
fn clocks_till_overflow(&self) -> u64 {
match self.val_cycles {
Cycles::Unscaled(cyc) => {
let period = unscale(1 << 16, self.prescaler);
(period - cyc % period)
}
Cycles::CountUp(_) => {
// It makes no sense to do this calculation for countup timers
// since if they overflow it's because some lower timer overflowed
!0
}
}
}
/// Updates u16 value written to TIMER registers if we match a timer at `t_index`
fn set_val(&mut self, val: u16) {
match self.val_cycles {
Cycles::Unscaled(ref mut cyc)
=> *cyc = unscale(val as u64, self.prescaler),
Cycles::CountUp(ref mut cyc)
=> *cyc = val as u64,
}
}
}
#[derive(Clone)]
pub struct TimerStates {
deadline: Rc<Cell<Option<u64>>>,
pub(crate) global_counter: Rc<Cell<u64>>,
/// global_counter value for when each timer was started
start_counters: Rc<[Cell<u64>; 4]>,
all: Rc<RefCell<[TimerState; 4]>>
}
impl TimerStates {
pub fn new() -> TimerStates {
TimerStates {
deadline: Rc::new(Cell::new(None)),
global_counter: Rc::new(Cell::new(0)),
start_counters: Rc::new([
Cell::new(0), Cell::new(0),
Cell::new(0), Cell::new(0),
]),
all: Rc::new(RefCell::new([
TimerState::new(), TimerState::new(),
TimerState::new(), TimerState::new(),
]))
}
}
}
impl fmt::Debug for TimerStates {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TimerStates {{ }}")
}
}
pub fn handle_clock_update(timer_states: &TimerStates, clock_diff: u64, irq_tx: &mut irq::IrqSyncClient) {
// Update global counter
let ctr = timer_states.global_counter.get();
let new_ctr = ctr + clock_diff;
timer_states.global_counter.set(new_ctr);
// Check if we have any work to do
if!past_deadline(timer_states) {
return
}
let mut timers = timer_states.all.borrow_mut();
if let Cycles::CountUp(_) = timers[0].val_cycles {
panic!("Don't know how to handle TIMER0 as a count-up timer!");
}
// Update individual timers
let mut prev_overflowed = false;
for (index, timer) in timers.iter_mut().enumerate() {
if!timer.started {
continue;
}
let cycles = if let Cycles::CountUp(_) = timer.val_cycles
|
else {
let ctr = timer_states.global_counter.get();
let baseline = timer_states.start_counters[index].get();
let clock_diff = ctr - baseline;
timer_states.start_counters[index].set(ctr);
Cycles::Unscaled(clock_diff)
};
prev_overflowed = timer.incr_and_check_overflow(cycles);
if prev_overflowed {
irq_tx.assert(irq(index))
}
}
drop(timers);
update_deadlines(timer_states);
}
fn update_deadlines(timer_states: &TimerStates) {
let timers = timer_states.all.borrow();
let min_deadline = timers.iter()
.filter(|timer| timer.started)
.map(|timer| timer.clocks_till_overflow())
.min();
let ctr = timer_states.global_counter.get();
timer_states.deadline.set(min_deadline.map(|m| m + ctr));
}
fn past_deadline(timer_states: &TimerStates) -> bool {
let ctr = timer_states.global_counter.get();
if let Some(deadline) = timer_states.deadline.get() {
ctr > deadline
} else {
false
}
}
iodevice!(TimerDevice, {
internal_state: TimerStates;
regs: {
0x000 => val0: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 0);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 0);
}
0x002 => cnt0: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 0); }
0x004 => val1: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 1);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 1);
}
0x006 => cnt1: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 1); }
0x008 => val2: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 2);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 2);
}
0x00A => cnt2: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 2); }
0x00C => val3: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 3);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 3);
}
0x00E => cnt3: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 3); }
}
});
|
{
Cycles::CountUp(prev_overflowed as u64)
}
|
conditional_block
|
timer.rs
|
// TODO: How can we test this module?
use std::cell::{RefCell, Cell};
use std::fmt;
use std::rc::Rc;
use cpu::irq::{self, IrqClient};
use io::regs::IoReg;
#[derive(Clone, Copy, Debug)]
pub enum Prescaler {
Div1 = 0,
Div64 = 1,
Div256 = 2,
Div1024 = 3,
}
impl Prescaler {
fn
|
(val: u16) -> Prescaler {
match val {
0 => Prescaler::Div1,
1 => Prescaler::Div64,
2 => Prescaler::Div256,
3 => Prescaler::Div1024,
_ => unreachable!()
}
}
}
bf!(CntReg[u16] {
prescaler: 0:1,
count_up: 2:2,
irq_enable: 6:6,
started: 7:7
});
fn get_regs(dev: &mut TimerDevice, index: usize) -> (&mut IoReg<u16>, &mut IoReg<u16>) {
match index {
0 => (&mut dev.val0, &mut dev.cnt0),
1 => (&mut dev.val1, &mut dev.cnt1),
2 => (&mut dev.val2, &mut dev.cnt2),
3 => (&mut dev.val3, &mut dev.cnt3),
_ => unreachable!()
}
}
fn reg_val_update(dev: &mut TimerDevice, index: usize) {
let val = {
let (val, _) = get_regs(dev, index);
val.get()
};
let mut state = dev._internal_state.all.borrow_mut();
state[index].set_val(val);
drop(state);
update_deadlines(&dev._internal_state);
}
fn reg_val_read(dev: &mut TimerDevice, index: usize) {
let new_val = {
let state = dev._internal_state.all.borrow_mut();
state[index].val() as u16
};
let (val, _) = get_regs(dev, index);
val.set_unchecked(new_val);
}
fn reg_cnt_update(dev: &mut TimerDevice, index: usize) {
let cnt = {
let (_, cnt) = get_regs(dev, index);
CntReg::new(cnt.get())
};
let mut state = dev._internal_state.all.borrow_mut();
if!state[index].started && cnt.started.get() == 1 {
// Set baseline deadline counter for this timer so that we can
// subtract against it later
let baseline = dev._internal_state.global_counter.get();
dev._internal_state.start_counters[index].set(baseline);
}
state[index].started = cnt.started.get() == 1;
state[index].prescaler = Prescaler::new(cnt.prescaler.get());
state[index].val_cycles = if cnt.count_up.get() == 1 {
Cycles::CountUp(state[index].val())
} else {
Cycles::Unscaled(state[index].val())
};
trace!("Setting TIMER CNT{}: {:?}", index, state);
drop(state);
update_deadlines(&dev._internal_state);
}
/// Convert CPU cycles to timer cycles using the given prescaler
fn scale(cycles: u64, prescaler: Prescaler) -> u64 {
match prescaler {
Prescaler::Div1 => cycles,
Prescaler::Div64 => cycles >> 6,
Prescaler::Div256 => cycles >> 8,
Prescaler::Div1024 => cycles >> 10
}
}
/// Convert timer cycles to CPU cycles using the given prescaler
fn unscale(clock_ticks: u64, prescaler: Prescaler) -> u64 {
match prescaler {
Prescaler::Div1 => clock_ticks,
Prescaler::Div64 => clock_ticks << 6,
Prescaler::Div256 => clock_ticks << 8,
Prescaler::Div1024 => clock_ticks << 10
}
}
fn irq(t_index: usize) -> irq::IrqType9 {
match t_index {
0 => irq::IrqType9::Timer0,
1 => irq::IrqType9::Timer1,
2 => irq::IrqType9::Timer2,
3 => irq::IrqType9::Timer3,
_ => unreachable!()
}
}
#[derive(Copy, Clone, Debug)]
enum Cycles {
CountUp(u64),
Unscaled(u64)
}
#[derive(Debug)]
pub struct TimerState {
started: bool,
val_cycles: Cycles,
prescaler: Prescaler,
}
impl TimerState {
pub fn new() -> TimerState {
TimerState {
started: false,
val_cycles: Cycles::Unscaled(0),
prescaler: Prescaler::Div1
}
}
/// Current timer value in timer cycles
fn val(&self) -> u64 {
match self.val_cycles {
Cycles::Unscaled(cyc) => scale(cyc, self.prescaler),
Cycles::CountUp(cyc) => cyc
}
}
/// Update current timer value with cycles elapsed, return true if overflow
fn incr_and_check_overflow(&mut self, clock_diff: Cycles) -> bool {
let till_overflow = self.clocks_till_overflow();
match self.val_cycles {
Cycles::Unscaled(ref mut cyc) => {
if let Cycles::Unscaled(addend) = clock_diff {
*cyc += addend;
addend >= till_overflow
} else { unreachable!() }
}
Cycles::CountUp(ref mut cyc) => {
if let Cycles::CountUp(addend) = clock_diff {
let out = addend + *cyc % (1 << 16) >= 1 << 16;
*cyc += addend;
out
} else { unreachable!() }
}
}
}
/// Returns the number of CPU clocks until this timer triggers an overflow
fn clocks_till_overflow(&self) -> u64 {
match self.val_cycles {
Cycles::Unscaled(cyc) => {
let period = unscale(1 << 16, self.prescaler);
(period - cyc % period)
}
Cycles::CountUp(_) => {
// It makes no sense to do this calculation for countup timers
// since if they overflow it's because some lower timer overflowed
!0
}
}
}
/// Updates u16 value written to TIMER registers if we match a timer at `t_index`
fn set_val(&mut self, val: u16) {
match self.val_cycles {
Cycles::Unscaled(ref mut cyc)
=> *cyc = unscale(val as u64, self.prescaler),
Cycles::CountUp(ref mut cyc)
=> *cyc = val as u64,
}
}
}
#[derive(Clone)]
pub struct TimerStates {
deadline: Rc<Cell<Option<u64>>>,
pub(crate) global_counter: Rc<Cell<u64>>,
/// global_counter value for when each timer was started
start_counters: Rc<[Cell<u64>; 4]>,
all: Rc<RefCell<[TimerState; 4]>>
}
impl TimerStates {
pub fn new() -> TimerStates {
TimerStates {
deadline: Rc::new(Cell::new(None)),
global_counter: Rc::new(Cell::new(0)),
start_counters: Rc::new([
Cell::new(0), Cell::new(0),
Cell::new(0), Cell::new(0),
]),
all: Rc::new(RefCell::new([
TimerState::new(), TimerState::new(),
TimerState::new(), TimerState::new(),
]))
}
}
}
impl fmt::Debug for TimerStates {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TimerStates {{ }}")
}
}
pub fn handle_clock_update(timer_states: &TimerStates, clock_diff: u64, irq_tx: &mut irq::IrqSyncClient) {
// Update global counter
let ctr = timer_states.global_counter.get();
let new_ctr = ctr + clock_diff;
timer_states.global_counter.set(new_ctr);
// Check if we have any work to do
if!past_deadline(timer_states) {
return
}
let mut timers = timer_states.all.borrow_mut();
if let Cycles::CountUp(_) = timers[0].val_cycles {
panic!("Don't know how to handle TIMER0 as a count-up timer!");
}
// Update individual timers
let mut prev_overflowed = false;
for (index, timer) in timers.iter_mut().enumerate() {
if!timer.started {
continue;
}
let cycles = if let Cycles::CountUp(_) = timer.val_cycles {
Cycles::CountUp(prev_overflowed as u64)
} else {
let ctr = timer_states.global_counter.get();
let baseline = timer_states.start_counters[index].get();
let clock_diff = ctr - baseline;
timer_states.start_counters[index].set(ctr);
Cycles::Unscaled(clock_diff)
};
prev_overflowed = timer.incr_and_check_overflow(cycles);
if prev_overflowed {
irq_tx.assert(irq(index))
}
}
drop(timers);
update_deadlines(timer_states);
}
fn update_deadlines(timer_states: &TimerStates) {
let timers = timer_states.all.borrow();
let min_deadline = timers.iter()
.filter(|timer| timer.started)
.map(|timer| timer.clocks_till_overflow())
.min();
let ctr = timer_states.global_counter.get();
timer_states.deadline.set(min_deadline.map(|m| m + ctr));
}
fn past_deadline(timer_states: &TimerStates) -> bool {
let ctr = timer_states.global_counter.get();
if let Some(deadline) = timer_states.deadline.get() {
ctr > deadline
} else {
false
}
}
iodevice!(TimerDevice, {
internal_state: TimerStates;
regs: {
0x000 => val0: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 0);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 0);
}
0x002 => cnt0: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 0); }
0x004 => val1: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 1);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 1);
}
0x006 => cnt1: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 1); }
0x008 => val2: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 2);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 2);
}
0x00A => cnt2: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 2); }
0x00C => val3: u16 {
read_effect = |dev: &mut TimerDevice| reg_val_read(dev, 3);
write_effect = |dev: &mut TimerDevice| reg_val_update(dev, 3);
}
0x00E => cnt3: u16 { write_effect = |dev: &mut TimerDevice| reg_cnt_update(dev, 3); }
}
});
|
new
|
identifier_name
|
issue-44197.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![feature(generators, generator_trait)]
use std::ops::{ Generator, GeneratorState };
fn foo(_: &str) -> String {
String::new()
}
fn bar(baz: String) -> impl Generator<Yield = String, Return = ()> {
move || {
yield foo(&baz);
}
}
fn
|
(_: &str) -> Result<String, ()> {
Err(())
}
fn bar2(baz: String) -> impl Generator<Yield = String, Return = ()> {
move || {
if let Ok(quux) = foo2(&baz) {
yield quux;
}
}
}
fn main() {
unsafe {
assert_eq!(bar(String::new()).resume(), GeneratorState::Yielded(String::new()));
assert_eq!(bar2(String::new()).resume(), GeneratorState::Complete(()));
}
}
|
foo2
|
identifier_name
|
issue-44197.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![feature(generators, generator_trait)]
use std::ops::{ Generator, GeneratorState };
fn foo(_: &str) -> String {
String::new()
}
fn bar(baz: String) -> impl Generator<Yield = String, Return = ()> {
move || {
yield foo(&baz);
}
}
fn foo2(_: &str) -> Result<String, ()> {
Err(())
}
fn bar2(baz: String) -> impl Generator<Yield = String, Return = ()> {
move || {
if let Ok(quux) = foo2(&baz)
|
}
}
fn main() {
unsafe {
assert_eq!(bar(String::new()).resume(), GeneratorState::Yielded(String::new()));
assert_eq!(bar2(String::new()).resume(), GeneratorState::Complete(()));
}
}
|
{
yield quux;
}
|
conditional_block
|
issue-44197.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![feature(generators, generator_trait)]
use std::ops::{ Generator, GeneratorState };
fn foo(_: &str) -> String {
String::new()
}
fn bar(baz: String) -> impl Generator<Yield = String, Return = ()> {
move || {
yield foo(&baz);
}
}
|
}
fn bar2(baz: String) -> impl Generator<Yield = String, Return = ()> {
move || {
if let Ok(quux) = foo2(&baz) {
yield quux;
}
}
}
fn main() {
unsafe {
assert_eq!(bar(String::new()).resume(), GeneratorState::Yielded(String::new()));
assert_eq!(bar2(String::new()).resume(), GeneratorState::Complete(()));
}
}
|
fn foo2(_: &str) -> Result<String, ()> {
Err(())
|
random_line_split
|
issue-44197.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![feature(generators, generator_trait)]
use std::ops::{ Generator, GeneratorState };
fn foo(_: &str) -> String {
String::new()
}
fn bar(baz: String) -> impl Generator<Yield = String, Return = ()> {
move || {
yield foo(&baz);
}
}
fn foo2(_: &str) -> Result<String, ()> {
Err(())
}
fn bar2(baz: String) -> impl Generator<Yield = String, Return = ()> {
move || {
if let Ok(quux) = foo2(&baz) {
yield quux;
}
}
}
fn main()
|
{
unsafe {
assert_eq!(bar(String::new()).resume(), GeneratorState::Yielded(String::new()));
assert_eq!(bar2(String::new()).resume(), GeneratorState::Complete(()));
}
}
|
identifier_body
|
|
exception.rs
|
use crate::wrapper::{NIF_ENV, NIF_TERM};
/// Raise an "error exception".
///
/// # Unsafe
///
/// The value returned by this function "can be used only as the return value
/// from the NIF that invoked it (directly or indirectly) or be passed to
/// `enif_is_exception`, but not to any other NIF API function."
///
/// And of course the usual rules about `env` and `term` still apply.
pub unsafe fn raise_exception(env: NIF_ENV, term: NIF_TERM) -> NIF_TERM {
rustler_sys::enif_raise_exception(env, term)
}
/// Raise a `badarg` exception.
///
/// # Unsafe
///
|
/// The value returned by this function "can be used only as the return value
/// from the NIF that invoked it (directly or indirectly) or be passed to
/// `enif_is_exception`, but not to any other NIF API function."
///
/// And of course `env` must be a valid environment.
pub unsafe fn raise_badarg(env: NIF_ENV) -> NIF_TERM {
rustler_sys::enif_make_badarg(env)
}
|
random_line_split
|
|
exception.rs
|
use crate::wrapper::{NIF_ENV, NIF_TERM};
/// Raise an "error exception".
///
/// # Unsafe
///
/// The value returned by this function "can be used only as the return value
/// from the NIF that invoked it (directly or indirectly) or be passed to
/// `enif_is_exception`, but not to any other NIF API function."
///
/// And of course the usual rules about `env` and `term` still apply.
pub unsafe fn raise_exception(env: NIF_ENV, term: NIF_TERM) -> NIF_TERM {
rustler_sys::enif_raise_exception(env, term)
}
/// Raise a `badarg` exception.
///
/// # Unsafe
///
/// The value returned by this function "can be used only as the return value
/// from the NIF that invoked it (directly or indirectly) or be passed to
/// `enif_is_exception`, but not to any other NIF API function."
///
/// And of course `env` must be a valid environment.
pub unsafe fn
|
(env: NIF_ENV) -> NIF_TERM {
rustler_sys::enif_make_badarg(env)
}
|
raise_badarg
|
identifier_name
|
exception.rs
|
use crate::wrapper::{NIF_ENV, NIF_TERM};
/// Raise an "error exception".
///
/// # Unsafe
///
/// The value returned by this function "can be used only as the return value
/// from the NIF that invoked it (directly or indirectly) or be passed to
/// `enif_is_exception`, but not to any other NIF API function."
///
/// And of course the usual rules about `env` and `term` still apply.
pub unsafe fn raise_exception(env: NIF_ENV, term: NIF_TERM) -> NIF_TERM {
rustler_sys::enif_raise_exception(env, term)
}
/// Raise a `badarg` exception.
///
/// # Unsafe
///
/// The value returned by this function "can be used only as the return value
/// from the NIF that invoked it (directly or indirectly) or be passed to
/// `enif_is_exception`, but not to any other NIF API function."
///
/// And of course `env` must be a valid environment.
pub unsafe fn raise_badarg(env: NIF_ENV) -> NIF_TERM
|
{
rustler_sys::enif_make_badarg(env)
}
|
identifier_body
|
|
action.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::path::Path;
use std::process::Command;
use std::process::Stdio;
use std::time::Instant;
use anyhow::Result;
use log::error;
use log::info;
use crate::error::*;
pub struct CloudSyncTrigger;
impl CloudSyncTrigger {
pub fn fire<P: AsRef<Path>>(
sid: &String,
path: P,
retries: u32,
version: Option<u64>,
workspace: String,
reason: String,
) -> Result<()>
|
.spawn()?; // do not retry if failed to start
info!(
"{} Fire `hg cloud sync` attempt {}, spawned process id '{}'",
sid,
i,
child.id()
);
let output = child.wait_with_output()?;
info!(
"{} stdout: \n{}",
sid,
String::from_utf8_lossy(&output.stdout).trim()
);
info!(
"{} stderr: \n{}",
sid,
String::from_utf8_lossy(&output.stderr).trim()
);
let end = now.elapsed();
info!(
"{} Cloud Sync time: {} sec {} ms",
sid,
end.as_secs(),
end.subsec_nanos() as u64 / 1_000_000
);
if!output.status.success() {
error!("{} Process exited with: {}", sid, output.status);
if i == retries - 1 {
return Err(ErrorKind::CommitCloudHgCloudSyncError(format!(
"process exited with: {}, retry later",
output.status
))
.into());
}
} else {
info!("{} Cloud Sync was successful", sid);
return Ok(());
}
}
Ok(())
}
}
|
{
let mut workspace_args = vec!["--raw-workspace-name".to_owned(), workspace];
if let Some(version) = version {
workspace_args.append(&mut vec![
"--workspace-version".to_owned(),
version.to_string(),
]);
}
for i in 0..retries {
let now = Instant::now();
let child = Command::new("hg")
.current_dir(&path)
.env("HGPLAIN", "hint")
.args(vec!["cloud", "sync"])
.arg("--check-autosync-enabled")
.arg("--use-bgssh")
.args(&workspace_args)
.args(&vec!["--reason", &reason])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
|
identifier_body
|
action.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::path::Path;
use std::process::Command;
use std::process::Stdio;
use std::time::Instant;
use anyhow::Result;
use log::error;
use log::info;
use crate::error::*;
pub struct CloudSyncTrigger;
impl CloudSyncTrigger {
pub fn fire<P: AsRef<Path>>(
sid: &String,
path: P,
retries: u32,
version: Option<u64>,
workspace: String,
reason: String,
) -> Result<()> {
let mut workspace_args = vec!["--raw-workspace-name".to_owned(), workspace];
if let Some(version) = version {
workspace_args.append(&mut vec![
"--workspace-version".to_owned(),
version.to_string(),
]);
}
for i in 0..retries {
let now = Instant::now();
let child = Command::new("hg")
.current_dir(&path)
.env("HGPLAIN", "hint")
.args(vec!["cloud", "sync"])
.arg("--check-autosync-enabled")
.arg("--use-bgssh")
.args(&workspace_args)
.args(&vec!["--reason", &reason])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?; // do not retry if failed to start
info!(
"{} Fire `hg cloud sync` attempt {}, spawned process id '{}'",
sid,
i,
child.id()
);
let output = child.wait_with_output()?;
info!(
"{} stdout: \n{}",
sid,
String::from_utf8_lossy(&output.stdout).trim()
);
info!(
"{} stderr: \n{}",
sid,
String::from_utf8_lossy(&output.stderr).trim()
);
let end = now.elapsed();
info!(
"{} Cloud Sync time: {} sec {} ms",
sid,
end.as_secs(),
end.subsec_nanos() as u64 / 1_000_000
);
if!output.status.success() {
error!("{} Process exited with: {}", sid, output.status);
if i == retries - 1
|
} else {
info!("{} Cloud Sync was successful", sid);
return Ok(());
}
}
Ok(())
}
}
|
{
return Err(ErrorKind::CommitCloudHgCloudSyncError(format!(
"process exited with: {}, retry later",
output.status
))
.into());
}
|
conditional_block
|
action.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::path::Path;
use std::process::Command;
use std::process::Stdio;
use std::time::Instant;
use anyhow::Result;
use log::error;
use log::info;
use crate::error::*;
pub struct CloudSyncTrigger;
impl CloudSyncTrigger {
pub fn fire<P: AsRef<Path>>(
sid: &String,
path: P,
retries: u32,
version: Option<u64>,
workspace: String,
reason: String,
) -> Result<()> {
let mut workspace_args = vec!["--raw-workspace-name".to_owned(), workspace];
if let Some(version) = version {
workspace_args.append(&mut vec![
"--workspace-version".to_owned(),
version.to_string(),
]);
}
for i in 0..retries {
let now = Instant::now();
let child = Command::new("hg")
.current_dir(&path)
.env("HGPLAIN", "hint")
.args(vec!["cloud", "sync"])
.arg("--check-autosync-enabled")
.arg("--use-bgssh")
.args(&workspace_args)
.args(&vec!["--reason", &reason])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?; // do not retry if failed to start
info!(
|
let output = child.wait_with_output()?;
info!(
"{} stdout: \n{}",
sid,
String::from_utf8_lossy(&output.stdout).trim()
);
info!(
"{} stderr: \n{}",
sid,
String::from_utf8_lossy(&output.stderr).trim()
);
let end = now.elapsed();
info!(
"{} Cloud Sync time: {} sec {} ms",
sid,
end.as_secs(),
end.subsec_nanos() as u64 / 1_000_000
);
if!output.status.success() {
error!("{} Process exited with: {}", sid, output.status);
if i == retries - 1 {
return Err(ErrorKind::CommitCloudHgCloudSyncError(format!(
"process exited with: {}, retry later",
output.status
))
.into());
}
} else {
info!("{} Cloud Sync was successful", sid);
return Ok(());
}
}
Ok(())
}
}
|
"{} Fire `hg cloud sync` attempt {}, spawned process id '{}'",
sid,
i,
child.id()
);
|
random_line_split
|
action.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::path::Path;
use std::process::Command;
use std::process::Stdio;
use std::time::Instant;
use anyhow::Result;
use log::error;
use log::info;
use crate::error::*;
pub struct CloudSyncTrigger;
impl CloudSyncTrigger {
pub fn
|
<P: AsRef<Path>>(
sid: &String,
path: P,
retries: u32,
version: Option<u64>,
workspace: String,
reason: String,
) -> Result<()> {
let mut workspace_args = vec!["--raw-workspace-name".to_owned(), workspace];
if let Some(version) = version {
workspace_args.append(&mut vec![
"--workspace-version".to_owned(),
version.to_string(),
]);
}
for i in 0..retries {
let now = Instant::now();
let child = Command::new("hg")
.current_dir(&path)
.env("HGPLAIN", "hint")
.args(vec!["cloud", "sync"])
.arg("--check-autosync-enabled")
.arg("--use-bgssh")
.args(&workspace_args)
.args(&vec!["--reason", &reason])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?; // do not retry if failed to start
info!(
"{} Fire `hg cloud sync` attempt {}, spawned process id '{}'",
sid,
i,
child.id()
);
let output = child.wait_with_output()?;
info!(
"{} stdout: \n{}",
sid,
String::from_utf8_lossy(&output.stdout).trim()
);
info!(
"{} stderr: \n{}",
sid,
String::from_utf8_lossy(&output.stderr).trim()
);
let end = now.elapsed();
info!(
"{} Cloud Sync time: {} sec {} ms",
sid,
end.as_secs(),
end.subsec_nanos() as u64 / 1_000_000
);
if!output.status.success() {
error!("{} Process exited with: {}", sid, output.status);
if i == retries - 1 {
return Err(ErrorKind::CommitCloudHgCloudSyncError(format!(
"process exited with: {}, retry later",
output.status
))
.into());
}
} else {
info!("{} Cloud Sync was successful", sid);
return Ok(());
}
}
Ok(())
}
}
|
fire
|
identifier_name
|
hash.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! General hash types, a fixed-size raw-data type used as the output of hash functions.
use std::{ops, fmt, cmp};
use std::cmp::{min, Ordering};
use std::ops::{Deref, DerefMut, BitXor, BitAnd, BitOr, IndexMut, Index};
use std::hash::{Hash, Hasher, BuildHasherDefault};
use std::collections::{HashMap, HashSet};
use std::str::FromStr;
use rand::Rng;
use rand::os::OsRng;
use rustc_serialize::hex::{FromHex, FromHexError};
use bigint::{Uint, U256};
use libc::{c_void, memcmp};
/// Trait for a fixed-size byte array to be used as the output of hash functions.
pub trait FixedHash: Sized {
/// Create a new, zero-initialised, instance.
fn new() -> Self;
/// Synonym for `new()`. Prefer to new as it's more readable.
fn zero() -> Self;
/// Create a new, cryptographically random, instance.
fn random() -> Self;
/// Assign self have a cryptographically random value.
fn randomize(&mut self);
/// Get the size of this object in bytes.
fn len() -> usize;
/// Convert a slice of bytes of length `len()` to an instance of this type.
fn from_slice(src: &[u8]) -> Self;
/// Assign self to be of the same value as a slice of bytes of length `len()`.
fn clone_from_slice(&mut self, src: &[u8]) -> usize;
/// Copy the data of this object into some mutable slice of length `len()`.
fn copy_to(&self, dest: &mut [u8]);
/// Returns `true` if all bits set in `b` are also set in `self`.
fn contains<'a>(&'a self, b: &'a Self) -> bool;
/// Returns `true` if no bits are set.
fn is_zero(&self) -> bool;
/// Returns the lowest 8 bytes interpreted as a BigEndian integer.
fn low_u64(&self) -> u64;
}
/// Return `s` without the `0x` at the beginning of it, if any.
pub fn clean_0x(s: &str) -> &str {
if s.starts_with("0x") {
&s[2..]
} else {
s
}
}
macro_rules! impl_hash {
($from: ident, $size: expr) => {
#[repr(C)]
/// Unformatted binary data of fixed length.
pub struct $from (pub [u8; $size]);
impl From<[u8; $size]> for $from {
fn from(bytes: [u8; $size]) -> Self {
$from(bytes)
}
}
impl From<$from> for [u8; $size] {
fn from(s: $from) -> Self {
s.0
}
}
impl Deref for $from {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
&self.0
}
}
impl AsRef<[u8]> for $from {
#[inline]
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl DerefMut for $from {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl FixedHash for $from {
fn new() -> $from {
$from([0; $size])
}
fn zero() -> $from {
$from([0; $size])
}
fn random() -> $from {
let mut hash = $from::new();
hash.randomize();
hash
}
fn randomize(&mut self) {
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut self.0);
}
fn len() -> usize {
$size
}
#[inline]
fn clone_from_slice(&mut self, src: &[u8]) -> usize {
let min = cmp::min($size, src.len());
self.0[..min].copy_from_slice(&src[..min]);
min
}
fn from_slice(src: &[u8]) -> Self {
let mut r = Self::new();
r.clone_from_slice(src);
r
}
fn copy_to(&self, dest: &mut[u8]) {
let min = cmp::min($size, dest.len());
dest[..min].copy_from_slice(&self.0[..min]);
}
fn contains<'a>(&'a self, b: &'a Self) -> bool {
&(b & self) == b
}
fn is_zero(&self) -> bool {
self.eq(&Self::new())
}
fn low_u64(&self) -> u64 {
let mut ret = 0u64;
for i in 0..min($size, 8) {
ret |= (self.0[$size - 1 - i] as u64) << (i * 8);
}
ret
}
}
impl FromStr for $from {
type Err = FromHexError;
fn from_str(s: &str) -> Result<$from, FromHexError> {
let a = s.from_hex()?;
if a.len()!= $size {
return Err(FromHexError::InvalidHexLength);
}
let mut ret = [0;$size];
ret.copy_from_slice(&a);
Ok($from(ret))
}
}
impl fmt::Debug for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in &self.0[..] {
write!(f, "{:02x}", i)?;
}
Ok(())
}
}
impl fmt::Display for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in &self.0[0..2] {
write!(f, "{:02x}", i)?;
}
write!(f, "…")?;
for i in &self.0[$size - 2..$size] {
write!(f, "{:02x}", i)?;
}
Ok(())
}
}
impl Copy for $from {}
#[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
impl Clone for $from {
fn clone(&self) -> $from {
let mut ret = $from::new();
ret.0.copy_from_slice(&self.0);
ret
}
}
impl Eq for $from {}
impl PartialEq for $from {
fn eq(&self, other: &Self) -> bool {
unsafe { memcmp(self.0.as_ptr() as *const c_void, other.0.as_ptr() as *const c_void, $size) == 0 }
}
}
impl Ord for $from {
fn cmp(&self, other: &Self) -> Ordering {
let r = unsafe { memcmp(self.0.as_ptr() as *const c_void, other.0.as_ptr() as *const c_void, $size) };
if r < 0 { return Ordering::Less }
if r > 0 { return Ordering::Greater }
return Ordering::Equal;
}
}
impl PartialOrd for $from {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Hash for $from {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write(&self.0);
state.finish();
}
}
impl Index<usize> for $from {
type Output = u8;
fn index(&self, index: usize) -> &u8 {
&self.0[index]
}
}
impl IndexMut<usize> for $from {
fn index_mut(&mut self, index: usize) -> &mut u8 {
&mut self.0[index]
}
}
impl Index<ops::Range<usize>> for $from {
type Output = [u8];
fn index(&self, index: ops::Range<usize>) -> &[u8] {
&self.0[index]
}
}
impl IndexMut<ops::Range<usize>> for $from {
fn index_mut(&mut self, index: ops::Range<usize>) -> &mut [u8] {
&mut self.0[index]
}
}
impl Index<ops::RangeFull> for $from {
type Output = [u8];
fn index(&self, _index: ops::RangeFull) -> &[u8] {
&self.0
}
}
impl IndexMut<ops::RangeFull> for $from {
fn index_mut(&mut self, _index: ops::RangeFull) -> &mut [u8] {
&mut self.0
}
}
/// `BitOr` on references
impl<'a> BitOr for &'a $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] | rhs.0[i];
}
ret
}
}
/// Moving `BitOr`
impl BitOr for $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
&self | &rhs
}
}
/// `BitAnd` on references
impl <'a> BitAnd for &'a $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] & rhs.0[i];
}
ret
}
}
/// Moving `BitAnd`
impl BitAnd for $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
&self & &rhs
}
}
/// `BitXor` on references
impl <'a> BitXor for &'a $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] ^ rhs.0[i];
}
ret
}
}
/// Moving `BitXor`
impl BitXor for $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
&self ^ &rhs
}
}
impl $from {
/// Get a hex representation.
pub fn hex(&self) -> String {
format!("{:?}", self)
}
}
impl Default for $from {
fn default() -> Self { $from::new() }
}
impl From<u64> for $from {
fn from(mut value: u64) -> $from {
let mut ret = $from::new();
for i in 0..8 {
if i < $size {
ret.0[$size - i - 1] = (value & 0xff) as u8;
value >>= 8;
}
}
ret
}
}
impl From<&'static str> for $from {
fn from(s: &'static str) -> $from {
let s = clean_0x(s);
if s.len() % 2 == 1 {
$from::from_str(&("0".to_owned() + s)).unwrap()
} else {
$from::from_str(s).unwrap()
}
}
}
impl<'a> From<&'a [u8]> for $from {
fn from(s: &'a [u8]) -> $from {
$from::from_slice(s)
}
}
}
}
impl From<U256> for H256 {
fn from(value: U256) -> H256 {
let mut ret = H256::new();
value.to_big_endian(&mut ret);
ret
}
}
impl<'a> From<&'a U256> for H256 {
fn from(value: &'a U256) -> H256 {
let mut ret: H256 = H256::new();
value.to_big_endian(&mut ret);
ret
}
}
impl From<H256> for U256 {
fn from(value: H256) -> U256 {
U256::from(&value)
}
}
impl<'a> From<&'a H256> for U256 {
fn from(value: &'a H256) -> U256 {
U256::from(value.as_ref() as &[u8])
}
}
impl From<H256> for H160 {
fn from(value: H256) -> H160 {
let mut ret = H160::new();
ret.0.copy_from_slice(&value[12..32]);
ret
}
}
impl From<H256> for H64 {
fn from(value: H256) -> H64 {
let mut ret = H64::new();
ret.0.copy_from_slice(&value[20..28]);
ret
}
}
impl From<H160> for H256 {
fn from(value: H160) -> H256 {
let mut ret = H256::new();
ret.0[12..32].copy_from_slice(&value);
ret
}
}
impl<'a> From<&'a H160> for H256 {
fn from(value: &'a H160) -> H256 {
let mut ret = H256::new();
ret.0[12..32].copy_from_slice(value);
ret
}
}
impl_hash!(H32, 4);
impl_hash!(H64, 8);
impl_hash!(H128, 16);
impl_hash!(H160, 20);
impl_hash!(H256, 32);
impl_hash!(H264, 33);
impl_hash!(H512, 64);
impl_hash!(H520, 65);
impl_hash!(H1024, 128);
impl_hash!(H2048, 256);
known_heap_size!(0, H32, H64, H128, H160, H256, H264, H512, H520, H1024, H2048);
// Specialized HashMap and HashSet
/// Hasher that just takes 8 bytes of the provided value.
/// May only be used for keys which are 32 bytes.
pub struct PlainHasher {
prefix: [u8; 8],
_marker: [u64; 0], // for alignment
}
impl Default for PlainHasher {
#[inline]
fn default() -> PlainHasher {
PlainHasher {
prefix: [0; 8],
_marker: [0; 0],
}
}
}
impl Hasher for PlainHasher {
#[inline]
fn finish(&self) -> u64 {
unsafe { ::std::mem::transmute(self.prefix) }
}
#[inline]
fn write(&mut self, bytes: &[u8]) {
debug_assert!(bytes.len() == 32);
for quarter in bytes.chunks(8) {
for (x, y) in self.prefix.iter_mut().zip(quarter) {
*x ^= *y
}
}
}
}
/// Specialized version of `HashMap` with H256 keys and fast hashing function.
pub type H256FastMap<T> = HashMap<H256, T, BuildHasherDefault<PlainHasher>>;
/// Specialized version of `HashSet` with H256 keys and fast hashing function.
pub type H256FastSet = HashSet<H256, BuildHasherDefault<PlainHasher>>;
#[cfg(test)]
mod tests {
use hash::*;
use bigint::*;
use std::str::FromStr;
#[test]
fn hasher_alignment() {
use std::mem::align_of;
assert_eq!(align_of::<u64>(), align_of::<PlainHasher>());
}
#[test]
#[cfg_attr(feature="dev", allow(eq_op))]
fn hash() {
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
assert_eq!(format!("{}", h), "0123…cdef");
assert_eq!(format!("{:?}", h), "0123456789abcdef");
assert_eq!(h.hex(), "0123456789abcdef");
assert!(h == h);
assert!(h!= H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xee]));
assert!(h!= H64([0; 8]));
}
#[test]
fn hash_bitor() {
let a = H64([1; 8]);
let b = H64([2; 8]);
let c = H64([3; 8]);
// borrow
assert_eq!(&a | &b, c);
// move
assert_eq!(a | b, c);
}
#[test]
fn from_and_to_address() {
let address: H160 = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into();
let h = H256::from(address.clone());
let a = H160::from(h);
assert_eq!(address, a);
}
#[test]
fn from
|
assert_eq!(H128::from(0x1234567890abcdef), H128::from_str("00000000000000001234567890abcdef").unwrap());
assert_eq!(H64::from(0x1234567890abcdef), H64::from_str("1234567890abcdef").unwrap());
assert_eq!(H32::from(0x1234567890abcdef), H32::from_str("90abcdef").unwrap());
}
#[test]
fn from_str() {
assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef"));
assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef"));
assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef"));
}
#[test]
fn from_and_to_u256() {
let u: U256 = 0x123456789abcdef0u64.into();
let h = H256::from(u);
assert_eq!(H256::from(u), H256::from("000000000000000000000000000000000000000000000000123456789abcdef0"));
let h_ref = H256::from(&u);
assert_eq!(h, h_ref);
let r_ref: U256 = From::from(&h);
assert_eq!(r_ref, u);
let r: U256 = From::from(h);
assert_eq!(r, u);
}
}
|
_u64() {
|
identifier_name
|
hash.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! General hash types, a fixed-size raw-data type used as the output of hash functions.
use std::{ops, fmt, cmp};
use std::cmp::{min, Ordering};
use std::ops::{Deref, DerefMut, BitXor, BitAnd, BitOr, IndexMut, Index};
use std::hash::{Hash, Hasher, BuildHasherDefault};
use std::collections::{HashMap, HashSet};
use std::str::FromStr;
use rand::Rng;
use rand::os::OsRng;
use rustc_serialize::hex::{FromHex, FromHexError};
use bigint::{Uint, U256};
use libc::{c_void, memcmp};
/// Trait for a fixed-size byte array to be used as the output of hash functions.
pub trait FixedHash: Sized {
/// Create a new, zero-initialised, instance.
fn new() -> Self;
/// Synonym for `new()`. Prefer to new as it's more readable.
fn zero() -> Self;
/// Create a new, cryptographically random, instance.
fn random() -> Self;
/// Assign self have a cryptographically random value.
fn randomize(&mut self);
/// Get the size of this object in bytes.
fn len() -> usize;
/// Convert a slice of bytes of length `len()` to an instance of this type.
fn from_slice(src: &[u8]) -> Self;
/// Assign self to be of the same value as a slice of bytes of length `len()`.
fn clone_from_slice(&mut self, src: &[u8]) -> usize;
/// Copy the data of this object into some mutable slice of length `len()`.
fn copy_to(&self, dest: &mut [u8]);
/// Returns `true` if all bits set in `b` are also set in `self`.
fn contains<'a>(&'a self, b: &'a Self) -> bool;
/// Returns `true` if no bits are set.
fn is_zero(&self) -> bool;
/// Returns the lowest 8 bytes interpreted as a BigEndian integer.
fn low_u64(&self) -> u64;
}
/// Return `s` without the `0x` at the beginning of it, if any.
pub fn clean_0x(s: &str) -> &str {
if s.starts_with("0x") {
&s[2..]
} else {
s
}
}
macro_rules! impl_hash {
($from: ident, $size: expr) => {
#[repr(C)]
/// Unformatted binary data of fixed length.
pub struct $from (pub [u8; $size]);
impl From<[u8; $size]> for $from {
fn from(bytes: [u8; $size]) -> Self {
$from(bytes)
}
}
impl From<$from> for [u8; $size] {
fn from(s: $from) -> Self {
s.0
}
}
impl Deref for $from {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
&self.0
}
}
impl AsRef<[u8]> for $from {
#[inline]
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl DerefMut for $from {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl FixedHash for $from {
fn new() -> $from {
$from([0; $size])
}
fn zero() -> $from {
$from([0; $size])
}
fn random() -> $from {
let mut hash = $from::new();
hash.randomize();
hash
}
fn randomize(&mut self) {
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut self.0);
}
fn len() -> usize {
$size
}
#[inline]
fn clone_from_slice(&mut self, src: &[u8]) -> usize {
let min = cmp::min($size, src.len());
self.0[..min].copy_from_slice(&src[..min]);
min
}
fn from_slice(src: &[u8]) -> Self {
let mut r = Self::new();
r.clone_from_slice(src);
r
}
fn copy_to(&self, dest: &mut[u8]) {
let min = cmp::min($size, dest.len());
dest[..min].copy_from_slice(&self.0[..min]);
}
fn contains<'a>(&'a self, b: &'a Self) -> bool {
&(b & self) == b
}
fn is_zero(&self) -> bool {
self.eq(&Self::new())
}
fn low_u64(&self) -> u64 {
let mut ret = 0u64;
for i in 0..min($size, 8) {
ret |= (self.0[$size - 1 - i] as u64) << (i * 8);
}
ret
}
}
impl FromStr for $from {
type Err = FromHexError;
fn from_str(s: &str) -> Result<$from, FromHexError> {
let a = s.from_hex()?;
if a.len()!= $size {
return Err(FromHexError::InvalidHexLength);
}
let mut ret = [0;$size];
ret.copy_from_slice(&a);
Ok($from(ret))
}
}
impl fmt::Debug for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in &self.0[..] {
write!(f, "{:02x}", i)?;
}
Ok(())
}
}
impl fmt::Display for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in &self.0[0..2] {
write!(f, "{:02x}", i)?;
}
write!(f, "…")?;
for i in &self.0[$size - 2..$size] {
write!(f, "{:02x}", i)?;
}
Ok(())
}
}
impl Copy for $from {}
#[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
impl Clone for $from {
fn clone(&self) -> $from {
let mut ret = $from::new();
ret.0.copy_from_slice(&self.0);
ret
}
}
impl Eq for $from {}
impl PartialEq for $from {
fn eq(&self, other: &Self) -> bool {
unsafe { memcmp(self.0.as_ptr() as *const c_void, other.0.as_ptr() as *const c_void, $size) == 0 }
}
}
impl Ord for $from {
fn cmp(&self, other: &Self) -> Ordering {
let r = unsafe { memcmp(self.0.as_ptr() as *const c_void, other.0.as_ptr() as *const c_void, $size) };
if r < 0 { return Ordering::Less }
if r > 0 { return Ordering::Greater }
return Ordering::Equal;
}
}
impl PartialOrd for $from {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Hash for $from {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write(&self.0);
state.finish();
}
}
impl Index<usize> for $from {
type Output = u8;
fn index(&self, index: usize) -> &u8 {
&self.0[index]
}
}
impl IndexMut<usize> for $from {
fn index_mut(&mut self, index: usize) -> &mut u8 {
&mut self.0[index]
}
}
impl Index<ops::Range<usize>> for $from {
type Output = [u8];
fn index(&self, index: ops::Range<usize>) -> &[u8] {
&self.0[index]
}
}
impl IndexMut<ops::Range<usize>> for $from {
fn index_mut(&mut self, index: ops::Range<usize>) -> &mut [u8] {
&mut self.0[index]
}
}
impl Index<ops::RangeFull> for $from {
type Output = [u8];
fn index(&self, _index: ops::RangeFull) -> &[u8] {
&self.0
}
}
impl IndexMut<ops::RangeFull> for $from {
fn index_mut(&mut self, _index: ops::RangeFull) -> &mut [u8] {
&mut self.0
}
}
/// `BitOr` on references
impl<'a> BitOr for &'a $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] | rhs.0[i];
}
ret
}
}
/// Moving `BitOr`
impl BitOr for $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
&self | &rhs
}
}
/// `BitAnd` on references
impl <'a> BitAnd for &'a $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] & rhs.0[i];
}
ret
}
}
/// Moving `BitAnd`
impl BitAnd for $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
&self & &rhs
}
}
/// `BitXor` on references
impl <'a> BitXor for &'a $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] ^ rhs.0[i];
}
ret
}
}
/// Moving `BitXor`
impl BitXor for $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
&self ^ &rhs
}
}
impl $from {
/// Get a hex representation.
pub fn hex(&self) -> String {
format!("{:?}", self)
}
}
impl Default for $from {
fn default() -> Self { $from::new() }
}
impl From<u64> for $from {
fn from(mut value: u64) -> $from {
let mut ret = $from::new();
for i in 0..8 {
if i < $size {
ret.0[$size - i - 1] = (value & 0xff) as u8;
value >>= 8;
}
}
ret
}
}
impl From<&'static str> for $from {
fn from(s: &'static str) -> $from {
let s = clean_0x(s);
if s.len() % 2 == 1 {
$from::from_str(&("0".to_owned() + s)).unwrap()
} else {
$from::from_str(s).unwrap()
}
}
}
impl<'a> From<&'a [u8]> for $from {
fn from(s: &'a [u8]) -> $from {
$from::from_slice(s)
}
}
}
}
impl From<U256> for H256 {
fn from(value: U256) -> H256 {
let mut ret = H256::new();
value.to_big_endian(&mut ret);
ret
}
}
impl<'a> From<&'a U256> for H256 {
fn from(value: &'a U256) -> H256 {
let mut ret: H256 = H256::new();
value.to_big_endian(&mut ret);
ret
}
}
impl From<H256> for U256 {
fn from(value: H256) -> U256 {
U256::from(&value)
}
}
impl<'a> From<&'a H256> for U256 {
fn from(value: &'a H256) -> U256 {
U256::from(value.as_ref() as &[u8])
}
}
impl From<H256> for H160 {
fn from(value: H256) -> H160 {
let mut ret = H160::new();
ret.0.copy_from_slice(&value[12..32]);
ret
}
}
impl From<H256> for H64 {
fn from(value: H256) -> H64 {
let mut ret = H64::new();
ret.0.copy_from_slice(&value[20..28]);
ret
}
}
impl From<H160> for H256 {
fn from(value: H160) -> H256 {
let mut ret = H256::new();
ret.0[12..32].copy_from_slice(&value);
ret
}
}
impl<'a> From<&'a H160> for H256 {
fn from(value: &'a H160) -> H256 {
let mut ret = H256::new();
ret.0[12..32].copy_from_slice(value);
ret
}
}
impl_hash!(H32, 4);
impl_hash!(H64, 8);
impl_hash!(H128, 16);
|
impl_hash!(H512, 64);
impl_hash!(H520, 65);
impl_hash!(H1024, 128);
impl_hash!(H2048, 256);
known_heap_size!(0, H32, H64, H128, H160, H256, H264, H512, H520, H1024, H2048);
// Specialized HashMap and HashSet
/// Hasher that just takes 8 bytes of the provided value.
/// May only be used for keys which are 32 bytes.
pub struct PlainHasher {
prefix: [u8; 8],
_marker: [u64; 0], // for alignment
}
impl Default for PlainHasher {
#[inline]
fn default() -> PlainHasher {
PlainHasher {
prefix: [0; 8],
_marker: [0; 0],
}
}
}
impl Hasher for PlainHasher {
#[inline]
fn finish(&self) -> u64 {
unsafe { ::std::mem::transmute(self.prefix) }
}
#[inline]
fn write(&mut self, bytes: &[u8]) {
debug_assert!(bytes.len() == 32);
for quarter in bytes.chunks(8) {
for (x, y) in self.prefix.iter_mut().zip(quarter) {
*x ^= *y
}
}
}
}
/// Specialized version of `HashMap` with H256 keys and fast hashing function.
pub type H256FastMap<T> = HashMap<H256, T, BuildHasherDefault<PlainHasher>>;
/// Specialized version of `HashSet` with H256 keys and fast hashing function.
pub type H256FastSet = HashSet<H256, BuildHasherDefault<PlainHasher>>;
#[cfg(test)]
mod tests {
use hash::*;
use bigint::*;
use std::str::FromStr;
#[test]
fn hasher_alignment() {
use std::mem::align_of;
assert_eq!(align_of::<u64>(), align_of::<PlainHasher>());
}
#[test]
#[cfg_attr(feature="dev", allow(eq_op))]
fn hash() {
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
assert_eq!(format!("{}", h), "0123…cdef");
assert_eq!(format!("{:?}", h), "0123456789abcdef");
assert_eq!(h.hex(), "0123456789abcdef");
assert!(h == h);
assert!(h!= H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xee]));
assert!(h!= H64([0; 8]));
}
#[test]
fn hash_bitor() {
let a = H64([1; 8]);
let b = H64([2; 8]);
let c = H64([3; 8]);
// borrow
assert_eq!(&a | &b, c);
// move
assert_eq!(a | b, c);
}
#[test]
fn from_and_to_address() {
let address: H160 = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into();
let h = H256::from(address.clone());
let a = H160::from(h);
assert_eq!(address, a);
}
#[test]
fn from_u64() {
assert_eq!(H128::from(0x1234567890abcdef), H128::from_str("00000000000000001234567890abcdef").unwrap());
assert_eq!(H64::from(0x1234567890abcdef), H64::from_str("1234567890abcdef").unwrap());
assert_eq!(H32::from(0x1234567890abcdef), H32::from_str("90abcdef").unwrap());
}
#[test]
fn from_str() {
assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef"));
assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef"));
assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef"));
}
#[test]
fn from_and_to_u256() {
let u: U256 = 0x123456789abcdef0u64.into();
let h = H256::from(u);
assert_eq!(H256::from(u), H256::from("000000000000000000000000000000000000000000000000123456789abcdef0"));
let h_ref = H256::from(&u);
assert_eq!(h, h_ref);
let r_ref: U256 = From::from(&h);
assert_eq!(r_ref, u);
let r: U256 = From::from(h);
assert_eq!(r, u);
}
}
|
impl_hash!(H160, 20);
impl_hash!(H256, 32);
impl_hash!(H264, 33);
|
random_line_split
|
hash.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! General hash types, a fixed-size raw-data type used as the output of hash functions.
use std::{ops, fmt, cmp};
use std::cmp::{min, Ordering};
use std::ops::{Deref, DerefMut, BitXor, BitAnd, BitOr, IndexMut, Index};
use std::hash::{Hash, Hasher, BuildHasherDefault};
use std::collections::{HashMap, HashSet};
use std::str::FromStr;
use rand::Rng;
use rand::os::OsRng;
use rustc_serialize::hex::{FromHex, FromHexError};
use bigint::{Uint, U256};
use libc::{c_void, memcmp};
/// Trait for a fixed-size byte array to be used as the output of hash functions.
pub trait FixedHash: Sized {
/// Create a new, zero-initialised, instance.
fn new() -> Self;
/// Synonym for `new()`. Prefer to new as it's more readable.
fn zero() -> Self;
/// Create a new, cryptographically random, instance.
fn random() -> Self;
/// Assign self have a cryptographically random value.
fn randomize(&mut self);
/// Get the size of this object in bytes.
fn len() -> usize;
/// Convert a slice of bytes of length `len()` to an instance of this type.
fn from_slice(src: &[u8]) -> Self;
/// Assign self to be of the same value as a slice of bytes of length `len()`.
fn clone_from_slice(&mut self, src: &[u8]) -> usize;
/// Copy the data of this object into some mutable slice of length `len()`.
fn copy_to(&self, dest: &mut [u8]);
/// Returns `true` if all bits set in `b` are also set in `self`.
fn contains<'a>(&'a self, b: &'a Self) -> bool;
/// Returns `true` if no bits are set.
fn is_zero(&self) -> bool;
/// Returns the lowest 8 bytes interpreted as a BigEndian integer.
fn low_u64(&self) -> u64;
}
/// Return `s` without the `0x` at the beginning of it, if any.
pub fn clean_0x(s: &str) -> &str {
if s.starts_with("0x") {
&s[2..]
} else {
s
}
}
macro_rules! impl_hash {
($from: ident, $size: expr) => {
#[repr(C)]
/// Unformatted binary data of fixed length.
pub struct $from (pub [u8; $size]);
impl From<[u8; $size]> for $from {
fn from(bytes: [u8; $size]) -> Self {
$from(bytes)
}
}
impl From<$from> for [u8; $size] {
fn from(s: $from) -> Self {
s.0
}
}
impl Deref for $from {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
&self.0
}
}
impl AsRef<[u8]> for $from {
#[inline]
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl DerefMut for $from {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl FixedHash for $from {
fn new() -> $from {
$from([0; $size])
}
fn zero() -> $from {
$from([0; $size])
}
fn random() -> $from {
let mut hash = $from::new();
hash.randomize();
hash
}
fn randomize(&mut self) {
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut self.0);
}
fn len() -> usize {
$size
}
#[inline]
fn clone_from_slice(&mut self, src: &[u8]) -> usize {
let min = cmp::min($size, src.len());
self.0[..min].copy_from_slice(&src[..min]);
min
}
fn from_slice(src: &[u8]) -> Self {
let mut r = Self::new();
r.clone_from_slice(src);
r
}
fn copy_to(&self, dest: &mut[u8]) {
let min = cmp::min($size, dest.len());
dest[..min].copy_from_slice(&self.0[..min]);
}
fn contains<'a>(&'a self, b: &'a Self) -> bool {
&(b & self) == b
}
fn is_zero(&self) -> bool {
self.eq(&Self::new())
}
fn low_u64(&self) -> u64 {
let mut ret = 0u64;
for i in 0..min($size, 8) {
ret |= (self.0[$size - 1 - i] as u64) << (i * 8);
}
ret
}
}
impl FromStr for $from {
type Err = FromHexError;
fn from_str(s: &str) -> Result<$from, FromHexError> {
let a = s.from_hex()?;
if a.len()!= $size {
return Err(FromHexError::InvalidHexLength);
}
let mut ret = [0;$size];
ret.copy_from_slice(&a);
Ok($from(ret))
}
}
impl fmt::Debug for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in &self.0[..] {
write!(f, "{:02x}", i)?;
}
Ok(())
}
}
impl fmt::Display for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in &self.0[0..2] {
write!(f, "{:02x}", i)?;
}
write!(f, "…")?;
for i in &self.0[$size - 2..$size] {
write!(f, "{:02x}", i)?;
}
Ok(())
}
}
impl Copy for $from {}
#[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
impl Clone for $from {
fn clone(&self) -> $from {
let mut ret = $from::new();
ret.0.copy_from_slice(&self.0);
ret
}
}
impl Eq for $from {}
impl PartialEq for $from {
fn eq(&self, other: &Self) -> bool {
unsafe { memcmp(self.0.as_ptr() as *const c_void, other.0.as_ptr() as *const c_void, $size) == 0 }
}
}
impl Ord for $from {
fn cmp(&self, other: &Self) -> Ordering {
let r = unsafe { memcmp(self.0.as_ptr() as *const c_void, other.0.as_ptr() as *const c_void, $size) };
if r < 0 { return Ordering::Less }
if r > 0 { return Ordering::Greater }
return Ordering::Equal;
}
}
impl PartialOrd for $from {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Hash for $from {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write(&self.0);
state.finish();
}
}
impl Index<usize> for $from {
type Output = u8;
fn index(&self, index: usize) -> &u8 {
&self.0[index]
}
}
impl IndexMut<usize> for $from {
fn index_mut(&mut self, index: usize) -> &mut u8 {
&mut self.0[index]
}
}
impl Index<ops::Range<usize>> for $from {
type Output = [u8];
fn index(&self, index: ops::Range<usize>) -> &[u8] {
&self.0[index]
}
}
impl IndexMut<ops::Range<usize>> for $from {
fn index_mut(&mut self, index: ops::Range<usize>) -> &mut [u8] {
&mut self.0[index]
}
}
impl Index<ops::RangeFull> for $from {
type Output = [u8];
fn index(&self, _index: ops::RangeFull) -> &[u8] {
&self.0
}
}
impl IndexMut<ops::RangeFull> for $from {
fn index_mut(&mut self, _index: ops::RangeFull) -> &mut [u8] {
&mut self.0
}
}
/// `BitOr` on references
impl<'a> BitOr for &'a $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] | rhs.0[i];
}
ret
}
}
/// Moving `BitOr`
impl BitOr for $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
&self | &rhs
}
}
/// `BitAnd` on references
impl <'a> BitAnd for &'a $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] & rhs.0[i];
}
ret
}
}
/// Moving `BitAnd`
impl BitAnd for $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
&self & &rhs
}
}
/// `BitXor` on references
impl <'a> BitXor for &'a $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] ^ rhs.0[i];
}
ret
}
}
/// Moving `BitXor`
impl BitXor for $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
&self ^ &rhs
}
}
impl $from {
/// Get a hex representation.
pub fn hex(&self) -> String {
format!("{:?}", self)
}
}
impl Default for $from {
fn default() -> Self { $from::new() }
}
impl From<u64> for $from {
fn from(mut value: u64) -> $from {
let mut ret = $from::new();
for i in 0..8 {
if i < $size {
ret.0[$size - i - 1] = (value & 0xff) as u8;
value >>= 8;
}
}
ret
}
}
impl From<&'static str> for $from {
fn from(s: &'static str) -> $from {
let s = clean_0x(s);
if s.len() % 2 == 1 {
$from::from_str(&("0".to_owned() + s)).unwrap()
} else {
$from::from_str(s).unwrap()
}
}
}
impl<'a> From<&'a [u8]> for $from {
fn from(s: &'a [u8]) -> $from {
$from::from_slice(s)
}
}
}
}
impl From<U256> for H256 {
fn from(value: U256) -> H256 {
let mut ret = H256::new();
value.to_big_endian(&mut ret);
ret
}
}
impl<'a> From<&'a U256> for H256 {
fn from(value: &'a U256) -> H256 {
|
impl From<H256> for U256 {
fn from(value: H256) -> U256 {
U256::from(&value)
}
}
impl<'a> From<&'a H256> for U256 {
fn from(value: &'a H256) -> U256 {
U256::from(value.as_ref() as &[u8])
}
}
impl From<H256> for H160 {
fn from(value: H256) -> H160 {
let mut ret = H160::new();
ret.0.copy_from_slice(&value[12..32]);
ret
}
}
impl From<H256> for H64 {
fn from(value: H256) -> H64 {
let mut ret = H64::new();
ret.0.copy_from_slice(&value[20..28]);
ret
}
}
impl From<H160> for H256 {
fn from(value: H160) -> H256 {
let mut ret = H256::new();
ret.0[12..32].copy_from_slice(&value);
ret
}
}
impl<'a> From<&'a H160> for H256 {
fn from(value: &'a H160) -> H256 {
let mut ret = H256::new();
ret.0[12..32].copy_from_slice(value);
ret
}
}
impl_hash!(H32, 4);
impl_hash!(H64, 8);
impl_hash!(H128, 16);
impl_hash!(H160, 20);
impl_hash!(H256, 32);
impl_hash!(H264, 33);
impl_hash!(H512, 64);
impl_hash!(H520, 65);
impl_hash!(H1024, 128);
impl_hash!(H2048, 256);
known_heap_size!(0, H32, H64, H128, H160, H256, H264, H512, H520, H1024, H2048);
// Specialized HashMap and HashSet
/// Hasher that just takes 8 bytes of the provided value.
/// May only be used for keys which are 32 bytes.
pub struct PlainHasher {
prefix: [u8; 8],
_marker: [u64; 0], // for alignment
}
impl Default for PlainHasher {
#[inline]
fn default() -> PlainHasher {
PlainHasher {
prefix: [0; 8],
_marker: [0; 0],
}
}
}
impl Hasher for PlainHasher {
#[inline]
fn finish(&self) -> u64 {
unsafe { ::std::mem::transmute(self.prefix) }
}
#[inline]
fn write(&mut self, bytes: &[u8]) {
debug_assert!(bytes.len() == 32);
for quarter in bytes.chunks(8) {
for (x, y) in self.prefix.iter_mut().zip(quarter) {
*x ^= *y
}
}
}
}
/// Specialized version of `HashMap` with H256 keys and fast hashing function.
pub type H256FastMap<T> = HashMap<H256, T, BuildHasherDefault<PlainHasher>>;
/// Specialized version of `HashSet` with H256 keys and fast hashing function.
pub type H256FastSet = HashSet<H256, BuildHasherDefault<PlainHasher>>;
#[cfg(test)]
mod tests {
use hash::*;
use bigint::*;
use std::str::FromStr;
#[test]
fn hasher_alignment() {
use std::mem::align_of;
assert_eq!(align_of::<u64>(), align_of::<PlainHasher>());
}
#[test]
#[cfg_attr(feature="dev", allow(eq_op))]
fn hash() {
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
assert_eq!(format!("{}", h), "0123…cdef");
assert_eq!(format!("{:?}", h), "0123456789abcdef");
assert_eq!(h.hex(), "0123456789abcdef");
assert!(h == h);
assert!(h!= H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xee]));
assert!(h!= H64([0; 8]));
}
#[test]
fn hash_bitor() {
let a = H64([1; 8]);
let b = H64([2; 8]);
let c = H64([3; 8]);
// borrow
assert_eq!(&a | &b, c);
// move
assert_eq!(a | b, c);
}
#[test]
fn from_and_to_address() {
let address: H160 = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into();
let h = H256::from(address.clone());
let a = H160::from(h);
assert_eq!(address, a);
}
#[test]
fn from_u64() {
assert_eq!(H128::from(0x1234567890abcdef), H128::from_str("00000000000000001234567890abcdef").unwrap());
assert_eq!(H64::from(0x1234567890abcdef), H64::from_str("1234567890abcdef").unwrap());
assert_eq!(H32::from(0x1234567890abcdef), H32::from_str("90abcdef").unwrap());
}
#[test]
fn from_str() {
assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef"));
assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef"));
assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef"));
}
#[test]
fn from_and_to_u256() {
let u: U256 = 0x123456789abcdef0u64.into();
let h = H256::from(u);
assert_eq!(H256::from(u), H256::from("000000000000000000000000000000000000000000000000123456789abcdef0"));
let h_ref = H256::from(&u);
assert_eq!(h, h_ref);
let r_ref: U256 = From::from(&h);
assert_eq!(r_ref, u);
let r: U256 = From::from(h);
assert_eq!(r, u);
}
}
|
let mut ret: H256 = H256::new();
value.to_big_endian(&mut ret);
ret
}
}
|
identifier_body
|
hash.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! General hash types, a fixed-size raw-data type used as the output of hash functions.
use std::{ops, fmt, cmp};
use std::cmp::{min, Ordering};
use std::ops::{Deref, DerefMut, BitXor, BitAnd, BitOr, IndexMut, Index};
use std::hash::{Hash, Hasher, BuildHasherDefault};
use std::collections::{HashMap, HashSet};
use std::str::FromStr;
use rand::Rng;
use rand::os::OsRng;
use rustc_serialize::hex::{FromHex, FromHexError};
use bigint::{Uint, U256};
use libc::{c_void, memcmp};
/// Trait for a fixed-size byte array to be used as the output of hash functions.
pub trait FixedHash: Sized {
/// Create a new, zero-initialised, instance.
fn new() -> Self;
/// Synonym for `new()`. Prefer to new as it's more readable.
fn zero() -> Self;
/// Create a new, cryptographically random, instance.
fn random() -> Self;
/// Assign self have a cryptographically random value.
fn randomize(&mut self);
/// Get the size of this object in bytes.
fn len() -> usize;
/// Convert a slice of bytes of length `len()` to an instance of this type.
fn from_slice(src: &[u8]) -> Self;
/// Assign self to be of the same value as a slice of bytes of length `len()`.
fn clone_from_slice(&mut self, src: &[u8]) -> usize;
/// Copy the data of this object into some mutable slice of length `len()`.
fn copy_to(&self, dest: &mut [u8]);
/// Returns `true` if all bits set in `b` are also set in `self`.
fn contains<'a>(&'a self, b: &'a Self) -> bool;
/// Returns `true` if no bits are set.
fn is_zero(&self) -> bool;
/// Returns the lowest 8 bytes interpreted as a BigEndian integer.
fn low_u64(&self) -> u64;
}
/// Return `s` without the `0x` at the beginning of it, if any.
pub fn clean_0x(s: &str) -> &str {
if s.starts_with("0x") {
&s[2..]
} else
|
}
macro_rules! impl_hash {
($from: ident, $size: expr) => {
#[repr(C)]
/// Unformatted binary data of fixed length.
pub struct $from (pub [u8; $size]);
impl From<[u8; $size]> for $from {
fn from(bytes: [u8; $size]) -> Self {
$from(bytes)
}
}
impl From<$from> for [u8; $size] {
fn from(s: $from) -> Self {
s.0
}
}
impl Deref for $from {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
&self.0
}
}
impl AsRef<[u8]> for $from {
#[inline]
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl DerefMut for $from {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl FixedHash for $from {
fn new() -> $from {
$from([0; $size])
}
fn zero() -> $from {
$from([0; $size])
}
fn random() -> $from {
let mut hash = $from::new();
hash.randomize();
hash
}
fn randomize(&mut self) {
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut self.0);
}
fn len() -> usize {
$size
}
#[inline]
fn clone_from_slice(&mut self, src: &[u8]) -> usize {
let min = cmp::min($size, src.len());
self.0[..min].copy_from_slice(&src[..min]);
min
}
fn from_slice(src: &[u8]) -> Self {
let mut r = Self::new();
r.clone_from_slice(src);
r
}
fn copy_to(&self, dest: &mut[u8]) {
let min = cmp::min($size, dest.len());
dest[..min].copy_from_slice(&self.0[..min]);
}
fn contains<'a>(&'a self, b: &'a Self) -> bool {
&(b & self) == b
}
fn is_zero(&self) -> bool {
self.eq(&Self::new())
}
fn low_u64(&self) -> u64 {
let mut ret = 0u64;
for i in 0..min($size, 8) {
ret |= (self.0[$size - 1 - i] as u64) << (i * 8);
}
ret
}
}
impl FromStr for $from {
type Err = FromHexError;
fn from_str(s: &str) -> Result<$from, FromHexError> {
let a = s.from_hex()?;
if a.len()!= $size {
return Err(FromHexError::InvalidHexLength);
}
let mut ret = [0;$size];
ret.copy_from_slice(&a);
Ok($from(ret))
}
}
impl fmt::Debug for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in &self.0[..] {
write!(f, "{:02x}", i)?;
}
Ok(())
}
}
impl fmt::Display for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in &self.0[0..2] {
write!(f, "{:02x}", i)?;
}
write!(f, "…")?;
for i in &self.0[$size - 2..$size] {
write!(f, "{:02x}", i)?;
}
Ok(())
}
}
impl Copy for $from {}
#[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
impl Clone for $from {
fn clone(&self) -> $from {
let mut ret = $from::new();
ret.0.copy_from_slice(&self.0);
ret
}
}
impl Eq for $from {}
impl PartialEq for $from {
fn eq(&self, other: &Self) -> bool {
unsafe { memcmp(self.0.as_ptr() as *const c_void, other.0.as_ptr() as *const c_void, $size) == 0 }
}
}
impl Ord for $from {
fn cmp(&self, other: &Self) -> Ordering {
let r = unsafe { memcmp(self.0.as_ptr() as *const c_void, other.0.as_ptr() as *const c_void, $size) };
if r < 0 { return Ordering::Less }
if r > 0 { return Ordering::Greater }
return Ordering::Equal;
}
}
impl PartialOrd for $from {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Hash for $from {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write(&self.0);
state.finish();
}
}
impl Index<usize> for $from {
type Output = u8;
fn index(&self, index: usize) -> &u8 {
&self.0[index]
}
}
impl IndexMut<usize> for $from {
fn index_mut(&mut self, index: usize) -> &mut u8 {
&mut self.0[index]
}
}
impl Index<ops::Range<usize>> for $from {
type Output = [u8];
fn index(&self, index: ops::Range<usize>) -> &[u8] {
&self.0[index]
}
}
impl IndexMut<ops::Range<usize>> for $from {
fn index_mut(&mut self, index: ops::Range<usize>) -> &mut [u8] {
&mut self.0[index]
}
}
impl Index<ops::RangeFull> for $from {
type Output = [u8];
fn index(&self, _index: ops::RangeFull) -> &[u8] {
&self.0
}
}
impl IndexMut<ops::RangeFull> for $from {
fn index_mut(&mut self, _index: ops::RangeFull) -> &mut [u8] {
&mut self.0
}
}
/// `BitOr` on references
impl<'a> BitOr for &'a $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] | rhs.0[i];
}
ret
}
}
/// Moving `BitOr`
impl BitOr for $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
&self | &rhs
}
}
/// `BitAnd` on references
impl <'a> BitAnd for &'a $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] & rhs.0[i];
}
ret
}
}
/// Moving `BitAnd`
impl BitAnd for $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
&self & &rhs
}
}
/// `BitXor` on references
impl <'a> BitXor for &'a $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
let mut ret: $from = $from::default();
for i in 0..$size {
ret.0[i] = self.0[i] ^ rhs.0[i];
}
ret
}
}
/// Moving `BitXor`
impl BitXor for $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
&self ^ &rhs
}
}
impl $from {
/// Get a hex representation.
pub fn hex(&self) -> String {
format!("{:?}", self)
}
}
impl Default for $from {
fn default() -> Self { $from::new() }
}
impl From<u64> for $from {
fn from(mut value: u64) -> $from {
let mut ret = $from::new();
for i in 0..8 {
if i < $size {
ret.0[$size - i - 1] = (value & 0xff) as u8;
value >>= 8;
}
}
ret
}
}
impl From<&'static str> for $from {
fn from(s: &'static str) -> $from {
let s = clean_0x(s);
if s.len() % 2 == 1 {
$from::from_str(&("0".to_owned() + s)).unwrap()
} else {
$from::from_str(s).unwrap()
}
}
}
impl<'a> From<&'a [u8]> for $from {
fn from(s: &'a [u8]) -> $from {
$from::from_slice(s)
}
}
}
}
impl From<U256> for H256 {
fn from(value: U256) -> H256 {
let mut ret = H256::new();
value.to_big_endian(&mut ret);
ret
}
}
impl<'a> From<&'a U256> for H256 {
fn from(value: &'a U256) -> H256 {
let mut ret: H256 = H256::new();
value.to_big_endian(&mut ret);
ret
}
}
impl From<H256> for U256 {
fn from(value: H256) -> U256 {
U256::from(&value)
}
}
impl<'a> From<&'a H256> for U256 {
fn from(value: &'a H256) -> U256 {
U256::from(value.as_ref() as &[u8])
}
}
impl From<H256> for H160 {
fn from(value: H256) -> H160 {
let mut ret = H160::new();
ret.0.copy_from_slice(&value[12..32]);
ret
}
}
impl From<H256> for H64 {
fn from(value: H256) -> H64 {
let mut ret = H64::new();
ret.0.copy_from_slice(&value[20..28]);
ret
}
}
impl From<H160> for H256 {
fn from(value: H160) -> H256 {
let mut ret = H256::new();
ret.0[12..32].copy_from_slice(&value);
ret
}
}
impl<'a> From<&'a H160> for H256 {
fn from(value: &'a H160) -> H256 {
let mut ret = H256::new();
ret.0[12..32].copy_from_slice(value);
ret
}
}
impl_hash!(H32, 4);
impl_hash!(H64, 8);
impl_hash!(H128, 16);
impl_hash!(H160, 20);
impl_hash!(H256, 32);
impl_hash!(H264, 33);
impl_hash!(H512, 64);
impl_hash!(H520, 65);
impl_hash!(H1024, 128);
impl_hash!(H2048, 256);
known_heap_size!(0, H32, H64, H128, H160, H256, H264, H512, H520, H1024, H2048);
// Specialized HashMap and HashSet
/// Hasher that just takes 8 bytes of the provided value.
/// May only be used for keys which are 32 bytes.
pub struct PlainHasher {
prefix: [u8; 8],
_marker: [u64; 0], // for alignment
}
impl Default for PlainHasher {
#[inline]
fn default() -> PlainHasher {
PlainHasher {
prefix: [0; 8],
_marker: [0; 0],
}
}
}
impl Hasher for PlainHasher {
#[inline]
fn finish(&self) -> u64 {
unsafe { ::std::mem::transmute(self.prefix) }
}
#[inline]
fn write(&mut self, bytes: &[u8]) {
debug_assert!(bytes.len() == 32);
for quarter in bytes.chunks(8) {
for (x, y) in self.prefix.iter_mut().zip(quarter) {
*x ^= *y
}
}
}
}
/// Specialized version of `HashMap` with H256 keys and fast hashing function.
pub type H256FastMap<T> = HashMap<H256, T, BuildHasherDefault<PlainHasher>>;
/// Specialized version of `HashSet` with H256 keys and fast hashing function.
pub type H256FastSet = HashSet<H256, BuildHasherDefault<PlainHasher>>;
#[cfg(test)]
mod tests {
use hash::*;
use bigint::*;
use std::str::FromStr;
#[test]
fn hasher_alignment() {
use std::mem::align_of;
assert_eq!(align_of::<u64>(), align_of::<PlainHasher>());
}
#[test]
#[cfg_attr(feature="dev", allow(eq_op))]
fn hash() {
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
assert_eq!(format!("{}", h), "0123…cdef");
assert_eq!(format!("{:?}", h), "0123456789abcdef");
assert_eq!(h.hex(), "0123456789abcdef");
assert!(h == h);
assert!(h!= H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xee]));
assert!(h!= H64([0; 8]));
}
#[test]
fn hash_bitor() {
let a = H64([1; 8]);
let b = H64([2; 8]);
let c = H64([3; 8]);
// borrow
assert_eq!(&a | &b, c);
// move
assert_eq!(a | b, c);
}
#[test]
fn from_and_to_address() {
let address: H160 = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into();
let h = H256::from(address.clone());
let a = H160::from(h);
assert_eq!(address, a);
}
#[test]
fn from_u64() {
assert_eq!(H128::from(0x1234567890abcdef), H128::from_str("00000000000000001234567890abcdef").unwrap());
assert_eq!(H64::from(0x1234567890abcdef), H64::from_str("1234567890abcdef").unwrap());
assert_eq!(H32::from(0x1234567890abcdef), H32::from_str("90abcdef").unwrap());
}
#[test]
fn from_str() {
assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef"));
assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef"));
assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef"));
}
#[test]
fn from_and_to_u256() {
let u: U256 = 0x123456789abcdef0u64.into();
let h = H256::from(u);
assert_eq!(H256::from(u), H256::from("000000000000000000000000000000000000000000000000123456789abcdef0"));
let h_ref = H256::from(&u);
assert_eq!(h, h_ref);
let r_ref: U256 = From::from(&h);
assert_eq!(r_ref, u);
let r: U256 = From::from(h);
assert_eq!(r, u);
}
}
|
{
s
}
|
conditional_block
|
lib.rs
|
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! An async-compatible HTTP client built on top of libcurl.
#![allow(dead_code)]
mod client;
mod driver;
mod errors;
mod event_listeners;
mod handler;
mod header;
mod pool;
mod progress;
mod receiver;
mod request;
mod response;
mod stats;
mod stream;
pub use client::Config;
pub use client::HttpClient;
pub use client::ResponseFuture;
pub use client::StatsFuture;
pub use curl::easy::HttpVersion;
pub use errors::Abort;
pub use errors::HttpClientError;
pub use errors::TlsError;
pub use errors::TlsErrorKind;
pub use header::Header;
pub use progress::Progress;
pub use receiver::Receiver;
pub use request::Encoding;
pub use request::Method;
pub use request::MinTransferSpeed;
pub use request::Request;
pub use request::RequestContext;
pub use request::RequestInfo;
pub use request::StreamRequest;
pub use response::AsyncBody;
pub use response::AsyncResponse;
pub use response::Response;
pub use stats::Stats;
pub use stream::BufferedStream;
pub use stream::CborStream;
|
/*
|
random_line_split
|
|
echo-udp.rs
|
//! An UDP echo server that just sends back everything that it receives.
//!
//! If you're on unix you can test this out by in one terminal executing:
//!
//! cargo run --example echo-udp
//!
//! and in another terminal you can run:
//!
//! cargo run --example connect -- --udp 127.0.0.1:8080
//!
//! Each line you type in to the `nc` terminal should be echo'd back to you!
extern crate futures;
#[macro_use]
extern crate tokio;
#[macro_use]
extern crate tokio_io;
use std::{env, io};
use std::net::SocketAddr;
use futures::{Future, Poll};
use tokio::net::UdpSocket;
use tokio::reactor::Core;
struct Server {
socket: UdpSocket,
buf: Vec<u8>,
to_send: Option<(usize, SocketAddr)>,
}
impl Future for Server {
type Item = ();
type Error = io::Error;
fn poll(&mut self) -> Poll<(), io::Error> {
loop {
// First we check to see if there's a message we need to echo back.
// If so then we try to send it back to the original source, waiting
// until it's writable and we're able to do so.
if let Some((size, peer)) = self.to_send
|
// If we're here then `to_send` is `None`, so we take a look for the
// next message we're going to echo back.
self.to_send = Some(try_nb!(self.socket.recv_from(&mut self.buf)));
}
}
}
fn main() {
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
// Create the event loop that will drive this server, and also bind the
// socket we'll be listening to.
let mut l = Core::new().unwrap();
let handle = l.handle();
let socket = UdpSocket::bind(&addr, &handle).unwrap();
println!("Listening on: {}", socket.local_addr().unwrap());
// Next we'll create a future to spawn (the one we defined above) and then
// we'll run the event loop by running the future.
l.run(Server {
socket: socket,
buf: vec![0; 1024],
to_send: None,
}).unwrap();
}
|
{
let amt = try_nb!(self.socket.send_to(&self.buf[..size], &peer));
println!("Echoed {}/{} bytes to {}", amt, size, peer);
self.to_send = None;
}
|
conditional_block
|
echo-udp.rs
|
//! An UDP echo server that just sends back everything that it receives.
//!
//! If you're on unix you can test this out by in one terminal executing:
//!
//! cargo run --example echo-udp
//!
//! and in another terminal you can run:
//!
//! cargo run --example connect -- --udp 127.0.0.1:8080
//!
//! Each line you type in to the `nc` terminal should be echo'd back to you!
extern crate futures;
#[macro_use]
extern crate tokio;
#[macro_use]
extern crate tokio_io;
use std::{env, io};
use std::net::SocketAddr;
use futures::{Future, Poll};
use tokio::net::UdpSocket;
use tokio::reactor::Core;
struct Server {
socket: UdpSocket,
buf: Vec<u8>,
to_send: Option<(usize, SocketAddr)>,
}
impl Future for Server {
type Item = ();
type Error = io::Error;
fn
|
(&mut self) -> Poll<(), io::Error> {
loop {
// First we check to see if there's a message we need to echo back.
// If so then we try to send it back to the original source, waiting
// until it's writable and we're able to do so.
if let Some((size, peer)) = self.to_send {
let amt = try_nb!(self.socket.send_to(&self.buf[..size], &peer));
println!("Echoed {}/{} bytes to {}", amt, size, peer);
self.to_send = None;
}
// If we're here then `to_send` is `None`, so we take a look for the
// next message we're going to echo back.
self.to_send = Some(try_nb!(self.socket.recv_from(&mut self.buf)));
}
}
}
fn main() {
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
// Create the event loop that will drive this server, and also bind the
// socket we'll be listening to.
let mut l = Core::new().unwrap();
let handle = l.handle();
let socket = UdpSocket::bind(&addr, &handle).unwrap();
println!("Listening on: {}", socket.local_addr().unwrap());
// Next we'll create a future to spawn (the one we defined above) and then
// we'll run the event loop by running the future.
l.run(Server {
socket: socket,
buf: vec![0; 1024],
to_send: None,
}).unwrap();
}
|
poll
|
identifier_name
|
echo-udp.rs
|
//! An UDP echo server that just sends back everything that it receives.
|
//! cargo run --example echo-udp
//!
//! and in another terminal you can run:
//!
//! cargo run --example connect -- --udp 127.0.0.1:8080
//!
//! Each line you type in to the `nc` terminal should be echo'd back to you!
extern crate futures;
#[macro_use]
extern crate tokio;
#[macro_use]
extern crate tokio_io;
use std::{env, io};
use std::net::SocketAddr;
use futures::{Future, Poll};
use tokio::net::UdpSocket;
use tokio::reactor::Core;
struct Server {
socket: UdpSocket,
buf: Vec<u8>,
to_send: Option<(usize, SocketAddr)>,
}
impl Future for Server {
type Item = ();
type Error = io::Error;
fn poll(&mut self) -> Poll<(), io::Error> {
loop {
// First we check to see if there's a message we need to echo back.
// If so then we try to send it back to the original source, waiting
// until it's writable and we're able to do so.
if let Some((size, peer)) = self.to_send {
let amt = try_nb!(self.socket.send_to(&self.buf[..size], &peer));
println!("Echoed {}/{} bytes to {}", amt, size, peer);
self.to_send = None;
}
// If we're here then `to_send` is `None`, so we take a look for the
// next message we're going to echo back.
self.to_send = Some(try_nb!(self.socket.recv_from(&mut self.buf)));
}
}
}
fn main() {
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
// Create the event loop that will drive this server, and also bind the
// socket we'll be listening to.
let mut l = Core::new().unwrap();
let handle = l.handle();
let socket = UdpSocket::bind(&addr, &handle).unwrap();
println!("Listening on: {}", socket.local_addr().unwrap());
// Next we'll create a future to spawn (the one we defined above) and then
// we'll run the event loop by running the future.
l.run(Server {
socket: socket,
buf: vec![0; 1024],
to_send: None,
}).unwrap();
}
|
//!
//! If you're on unix you can test this out by in one terminal executing:
//!
|
random_line_split
|
list_issues.rs
|
extern crate gitlab_api as gitlab;
use std::env;
#[macro_use]
extern crate log;
extern crate env_logger;
use gitlab::GitLab;
// use gitlab::Pagination;
use gitlab::issues;
use gitlab::Lister;
use gitlab::errors::*;
fn main() {
if let Err(ref e) = run() {
println!("error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
::std::process::exit(1);
}
}
fn run() -> Result<()>
|
}
};
let gl = GitLab::new(&hostname, &token).chain_err(|| "failure to create GitLab instance")?;
// let gl = GitLab::new(&hostname, &token)
// .chain_err(|| "failure to create GitLab instance")?
// .scheme("http").port(80);
// let gl = gl.scheme("http").port(80);
let issues = gl.issues().list().chain_err(|| "cannot get issues")?;
println!("issues: {:?}", issues);
let opened_issues =
gl.issues().state(issues::State::Opened).list().chain_err(|| "cannot get issues")?;
println!("opened_issues: {:?}", opened_issues);
let closed_issues =
gl.issues().state(issues::State::Closed).list().chain_err(|| "cannot get issues")?;
println!("closed_issues: {:?}", closed_issues);
let issue = gl.issues().single(142, 739).list().chain_err(|| "cannot get issues")?;
println!("issue: {:?}", issue);
let group_issues = gl.issues()
.group(21)
.state(issues::State::Closed)
.list()
.chain_err(|| "cannot get issues")?;
println!("group_issues: {:?}", group_issues);
let project_issues = gl.issues()
.project(142)
.state(issues::State::Opened)
.list()
.chain_err(|| "cannot get issues")?;
println!("project_issues: {:?}", project_issues);
Ok(())
}
|
{
env_logger::init().unwrap();
info!("starting up");
let hostname = match env::var("GITLAB_HOSTNAME") {
Ok(val) => val,
Err(_) => {
let default = String::from("gitlab.com");
println!("Please set environment variable 'GITLAB_HOSTNAME'. Using default '{}'.",
default);
default
}
};
let token = match env::var("GITLAB_TOKEN") {
Ok(val) => val,
Err(_) => {
panic!("Please set environment variable 'GITLAB_TOKEN'. Take it from \
http://{}/profile/account",
hostname);
|
identifier_body
|
list_issues.rs
|
extern crate gitlab_api as gitlab;
use std::env;
#[macro_use]
extern crate log;
extern crate env_logger;
use gitlab::GitLab;
// use gitlab::Pagination;
use gitlab::issues;
use gitlab::Lister;
use gitlab::errors::*;
fn
|
() {
if let Err(ref e) = run() {
println!("error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
::std::process::exit(1);
}
}
fn run() -> Result<()> {
env_logger::init().unwrap();
info!("starting up");
let hostname = match env::var("GITLAB_HOSTNAME") {
Ok(val) => val,
Err(_) => {
let default = String::from("gitlab.com");
println!("Please set environment variable 'GITLAB_HOSTNAME'. Using default '{}'.",
default);
default
}
};
let token = match env::var("GITLAB_TOKEN") {
Ok(val) => val,
Err(_) => {
panic!("Please set environment variable 'GITLAB_TOKEN'. Take it from \
http://{}/profile/account",
hostname);
}
};
let gl = GitLab::new(&hostname, &token).chain_err(|| "failure to create GitLab instance")?;
// let gl = GitLab::new(&hostname, &token)
// .chain_err(|| "failure to create GitLab instance")?
// .scheme("http").port(80);
// let gl = gl.scheme("http").port(80);
let issues = gl.issues().list().chain_err(|| "cannot get issues")?;
println!("issues: {:?}", issues);
let opened_issues =
gl.issues().state(issues::State::Opened).list().chain_err(|| "cannot get issues")?;
println!("opened_issues: {:?}", opened_issues);
let closed_issues =
gl.issues().state(issues::State::Closed).list().chain_err(|| "cannot get issues")?;
println!("closed_issues: {:?}", closed_issues);
let issue = gl.issues().single(142, 739).list().chain_err(|| "cannot get issues")?;
println!("issue: {:?}", issue);
let group_issues = gl.issues()
.group(21)
.state(issues::State::Closed)
.list()
.chain_err(|| "cannot get issues")?;
println!("group_issues: {:?}", group_issues);
let project_issues = gl.issues()
.project(142)
.state(issues::State::Opened)
.list()
.chain_err(|| "cannot get issues")?;
println!("project_issues: {:?}", project_issues);
Ok(())
}
|
main
|
identifier_name
|
list_issues.rs
|
extern crate gitlab_api as gitlab;
use std::env;
#[macro_use]
extern crate log;
extern crate env_logger;
use gitlab::GitLab;
// use gitlab::Pagination;
use gitlab::issues;
use gitlab::Lister;
use gitlab::errors::*;
fn main() {
if let Err(ref e) = run() {
println!("error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
::std::process::exit(1);
}
}
fn run() -> Result<()> {
env_logger::init().unwrap();
|
let hostname = match env::var("GITLAB_HOSTNAME") {
Ok(val) => val,
Err(_) => {
let default = String::from("gitlab.com");
println!("Please set environment variable 'GITLAB_HOSTNAME'. Using default '{}'.",
default);
default
}
};
let token = match env::var("GITLAB_TOKEN") {
Ok(val) => val,
Err(_) => {
panic!("Please set environment variable 'GITLAB_TOKEN'. Take it from \
http://{}/profile/account",
hostname);
}
};
let gl = GitLab::new(&hostname, &token).chain_err(|| "failure to create GitLab instance")?;
// let gl = GitLab::new(&hostname, &token)
// .chain_err(|| "failure to create GitLab instance")?
// .scheme("http").port(80);
// let gl = gl.scheme("http").port(80);
let issues = gl.issues().list().chain_err(|| "cannot get issues")?;
println!("issues: {:?}", issues);
let opened_issues =
gl.issues().state(issues::State::Opened).list().chain_err(|| "cannot get issues")?;
println!("opened_issues: {:?}", opened_issues);
let closed_issues =
gl.issues().state(issues::State::Closed).list().chain_err(|| "cannot get issues")?;
println!("closed_issues: {:?}", closed_issues);
let issue = gl.issues().single(142, 739).list().chain_err(|| "cannot get issues")?;
println!("issue: {:?}", issue);
let group_issues = gl.issues()
.group(21)
.state(issues::State::Closed)
.list()
.chain_err(|| "cannot get issues")?;
println!("group_issues: {:?}", group_issues);
let project_issues = gl.issues()
.project(142)
.state(issues::State::Opened)
.list()
.chain_err(|| "cannot get issues")?;
println!("project_issues: {:?}", project_issues);
Ok(())
}
|
info!("starting up");
|
random_line_split
|
table.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use layout::box_::Box;
use layout::block::BlockFlow;
use layout::block::{WidthAndMarginsComputer, WidthConstraintInput, WidthConstraintSolution};
use layout::construct::FlowConstructor;
use layout::context::LayoutContext;
use layout::display_list_builder::{DisplayListBuilder, ExtraDisplayListData};
use layout::floats::{FloatKind};
use layout::flow::{TableFlowClass, FlowClass, Flow, ImmutableFlowUtils};
use layout::flow;
use layout::table_wrapper::{TableLayout, FixedLayout, AutoLayout};
use layout::wrapper::ThreadSafeLayoutNode;
use std::cell::RefCell;
use style::computed_values::table_layout;
use geom::{Point2D, Rect, Size2D};
use gfx::display_list::DisplayListCollection;
use servo_util::geometry::Au;
/// A table flow corresponded to the table's internal table box under a table wrapper flow.
/// The properties `position`, `float`, and `margin-*` are used on the table wrapper box,
/// not table box per CSS 2.1 § 10.5.
pub struct TableFlow {
block_flow: BlockFlow,
/// Column widths
col_widths: ~[Au],
/// Table-layout property
table_layout: TableLayout,
}
impl TableFlow {
pub fn from_node_and_box(node: &ThreadSafeLayoutNode,
box_: Box)
-> TableFlow {
let mut block_flow = BlockFlow::from_node_and_box(node, box_);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode)
-> TableFlow {
let mut block_flow = BlockFlow::from_node(constructor, node);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn float_from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode,
float_kind: FloatKind)
-> TableFlow {
let mut block_flow = BlockFlow::float_from_node(constructor, node, float_kind);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
|
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn teardown(&mut self) {
self.block_flow.teardown();
self.col_widths = ~[];
}
/// Assign height for table flow.
///
/// inline(always) because this is only ever called by in-order or non-in-order top-level
/// methods
#[inline(always)]
fn assign_height_table_base(&mut self, ctx: &mut LayoutContext, inorder: bool) {
let (_, top_offset, bottom_offset, left_offset) = self.block_flow.initialize_offsets(true);
self.block_flow.handle_children_floats_if_necessary(ctx, inorder,
left_offset, top_offset);
let mut cur_y = top_offset;
for kid in self.block_flow.base.child_iter() {
let child_node = flow::mut_base(kid);
child_node.position.origin.y = cur_y;
cur_y = cur_y + child_node.position.size.height;
}
let height = cur_y - top_offset;
let mut noncontent_height = Au::new(0);
for box_ in self.block_flow.box_.iter() {
let mut position = box_.border_box.get();
// noncontent_height = border_top/bottom + padding_top/bottom of box
noncontent_height = box_.noncontent_height();
position.origin.y = Au(0);
position.size.height = height + noncontent_height;
box_.border_box.set(position);
}
self.block_flow.base.position.size.height = height + noncontent_height;
self.block_flow.set_floats_out_if_inorder(inorder, height, cur_y,
top_offset, bottom_offset, left_offset);
}
pub fn build_display_list_table<E:ExtraDisplayListData>(
&mut self,
builder: &DisplayListBuilder,
container_block_size: &Size2D<Au>,
absolute_cb_abs_position: Point2D<Au>,
dirty: &Rect<Au>,
index: uint,
lists: &RefCell<DisplayListCollection<E>>)
-> uint {
debug!("build_display_list_table: same process as block flow");
self.block_flow.build_display_list_block(builder, container_block_size,
absolute_cb_abs_position,
dirty, index, lists)
}
}
impl Flow for TableFlow {
fn class(&self) -> FlowClass {
TableFlowClass
}
fn as_table<'a>(&'a mut self) -> &'a mut TableFlow {
self
}
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
&mut self.block_flow
}
/// This function finds the specified column widths from column group and the first row.
/// Those are used in fixed table layout calculation.
/* FIXME: automatic table layout calculation */
fn bubble_widths(&mut self, ctx: &mut LayoutContext) {
let mut did_first_row = false;
/* find max width from child block contexts */
for kid in self.block_flow.base.child_iter() {
assert!(kid.is_proper_table_child());
if kid.is_table_colgroup() {
self.col_widths.push_all(kid.as_table_colgroup().widths);
} else if kid.is_table_rowgroup() || kid.is_table_row() {
// read column widths from table-row-group/table-row, and assign
// width=0 for the columns not defined in column-group
// FIXME: need to read widths from either table-header-group OR
// first table-row
let kid_col_widths = if kid.is_table_rowgroup() {
&kid.as_table_rowgroup().col_widths
} else {
&kid.as_table_row().col_widths
};
match self.table_layout {
FixedLayout if!did_first_row => {
did_first_row = true;
let mut child_widths = kid_col_widths.iter();
for col_width in self.col_widths.mut_iter() {
match child_widths.next() {
Some(child_width) => {
if *col_width == Au::new(0) {
*col_width = *child_width;
}
},
None => break
}
}
},
_ => {}
}
let num_child_cols = kid_col_widths.len();
let num_cols = self.col_widths.len();
debug!("colgroup has {} column(s) and child has {} column(s)", num_cols, num_child_cols);
for i in range(num_cols, num_child_cols) {
self.col_widths.push( kid_col_widths[i] );
}
}
}
self.block_flow.bubble_widths(ctx);
}
/// Recursively (top-down) determines the actual width of child contexts and boxes. When called
/// on this context, the context has had its width set by the parent context.
fn assign_widths(&mut self, ctx: &mut LayoutContext) {
debug!("assign_widths({}): assigning width for flow", "table");
// The position was set to the containing block by the flow's parent.
let containing_block_width = self.block_flow.base.position.size.width;
let mut left_content_edge = Au::new(0);
let mut content_width = containing_block_width;
let mut num_unspecified_widths = 0;
let mut total_column_width = Au::new(0);
for col_width in self.col_widths.iter() {
if *col_width == Au::new(0) {
num_unspecified_widths += 1;
} else {
total_column_width = total_column_width.add(col_width);
}
}
let width_computer = InternalTable;
width_computer.compute_used_width(&mut self.block_flow, ctx, containing_block_width);
for box_ in self.block_flow.box_.iter() {
left_content_edge = box_.padding.get().left + box_.border.get().left;
let padding_and_borders = box_.padding.get().left + box_.padding.get().right +
box_.border.get().left + box_.border.get().right;
content_width = box_.border_box.get().size.width - padding_and_borders;
}
// In fixed table layout, we distribute extra space among the unspecified columns if there are
// any, or among all the columns if all are specified.
if (total_column_width < content_width) && (num_unspecified_widths == 0) {
let ratio = content_width.to_f64().unwrap() / total_column_width.to_f64().unwrap();
for col_width in self.col_widths.mut_iter() {
*col_width = (*col_width).scale_by(ratio);
}
} else if num_unspecified_widths!= 0 {
let extra_column_width = (content_width - total_column_width) / Au::new(num_unspecified_widths);
for col_width in self.col_widths.mut_iter() {
if *col_width == Au(0) {
*col_width = extra_column_width;
}
}
}
self.block_flow.propagate_assigned_width_to_children(left_content_edge, content_width, Some(self.col_widths.clone()));
}
/// This is called on kid flows by a parent.
///
/// Hence, we can assume that assign_height has already been called on the
/// kid (because of the bottom-up traversal).
fn assign_height_inorder(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height_inorder: assigning height for table");
self.assign_height_table_base(ctx, true);
}
fn assign_height(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height: assigning height for table");
self.assign_height_table_base(ctx, false);
}
// CSS Section 8.3.1 - Collapsing Margins
// Since `margin` is not used on table box, `collapsing` and `collapsible` are set to 0
fn collapse_margins(&mut self,
_: bool,
_: &mut bool,
_: &mut Au,
_: &mut Au,
collapsing: &mut Au,
collapsible: &mut Au) {
// `margin` is not used on table box.
*collapsing = Au::new(0);
*collapsible = Au::new(0);
}
fn debug_str(&self) -> ~str {
let txt = ~"TableFlow: ";
txt.append(match self.block_flow.box_ {
Some(ref rb) => rb.debug_str(),
None => ~"",
})
}
}
/// Table, TableRowGroup, TableRow, TableCell types.
/// Their widths are calculated in the same way and do not have margins.
pub struct InternalTable;
impl WidthAndMarginsComputer for InternalTable {
/// Compute the used value of width, taking care of min-width and max-width.
///
/// CSS Section 10.4: Minimum and Maximum widths
fn compute_used_width(&self,
block: &mut BlockFlow,
ctx: &mut LayoutContext,
parent_flow_width: Au) {
let input = self.compute_width_constraint_inputs(block, parent_flow_width, ctx);
let solution = self.solve_width_constraints(block, input);
self.set_width_constraint_solutions(block, solution);
}
/// Solve the width and margins constraints for this block flow.
fn solve_width_constraints(&self,
_: &mut BlockFlow,
input: WidthConstraintInput)
-> WidthConstraintSolution {
WidthConstraintSolution::new(input.available_width, Au::new(0), Au::new(0))
}
}
|
AutoLayout
};
|
conditional_block
|
table.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use layout::box_::Box;
use layout::block::BlockFlow;
use layout::block::{WidthAndMarginsComputer, WidthConstraintInput, WidthConstraintSolution};
use layout::construct::FlowConstructor;
use layout::context::LayoutContext;
use layout::display_list_builder::{DisplayListBuilder, ExtraDisplayListData};
use layout::floats::{FloatKind};
use layout::flow::{TableFlowClass, FlowClass, Flow, ImmutableFlowUtils};
use layout::flow;
use layout::table_wrapper::{TableLayout, FixedLayout, AutoLayout};
use layout::wrapper::ThreadSafeLayoutNode;
use std::cell::RefCell;
use style::computed_values::table_layout;
use geom::{Point2D, Rect, Size2D};
use gfx::display_list::DisplayListCollection;
use servo_util::geometry::Au;
/// A table flow corresponded to the table's internal table box under a table wrapper flow.
/// The properties `position`, `float`, and `margin-*` are used on the table wrapper box,
/// not table box per CSS 2.1 § 10.5.
pub struct TableFlow {
block_flow: BlockFlow,
/// Column widths
col_widths: ~[Au],
/// Table-layout property
table_layout: TableLayout,
}
impl TableFlow {
pub fn from_node_and_box(node: &ThreadSafeLayoutNode,
box_: Box)
-> TableFlow {
let mut block_flow = BlockFlow::from_node_and_box(node, box_);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode)
-> TableFlow {
let mut block_flow = BlockFlow::from_node(constructor, node);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn float_from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode,
float_kind: FloatKind)
-> TableFlow {
|
pub fn teardown(&mut self) {
self.block_flow.teardown();
self.col_widths = ~[];
}
/// Assign height for table flow.
///
/// inline(always) because this is only ever called by in-order or non-in-order top-level
/// methods
#[inline(always)]
fn assign_height_table_base(&mut self, ctx: &mut LayoutContext, inorder: bool) {
let (_, top_offset, bottom_offset, left_offset) = self.block_flow.initialize_offsets(true);
self.block_flow.handle_children_floats_if_necessary(ctx, inorder,
left_offset, top_offset);
let mut cur_y = top_offset;
for kid in self.block_flow.base.child_iter() {
let child_node = flow::mut_base(kid);
child_node.position.origin.y = cur_y;
cur_y = cur_y + child_node.position.size.height;
}
let height = cur_y - top_offset;
let mut noncontent_height = Au::new(0);
for box_ in self.block_flow.box_.iter() {
let mut position = box_.border_box.get();
// noncontent_height = border_top/bottom + padding_top/bottom of box
noncontent_height = box_.noncontent_height();
position.origin.y = Au(0);
position.size.height = height + noncontent_height;
box_.border_box.set(position);
}
self.block_flow.base.position.size.height = height + noncontent_height;
self.block_flow.set_floats_out_if_inorder(inorder, height, cur_y,
top_offset, bottom_offset, left_offset);
}
pub fn build_display_list_table<E:ExtraDisplayListData>(
&mut self,
builder: &DisplayListBuilder,
container_block_size: &Size2D<Au>,
absolute_cb_abs_position: Point2D<Au>,
dirty: &Rect<Au>,
index: uint,
lists: &RefCell<DisplayListCollection<E>>)
-> uint {
debug!("build_display_list_table: same process as block flow");
self.block_flow.build_display_list_block(builder, container_block_size,
absolute_cb_abs_position,
dirty, index, lists)
}
}
impl Flow for TableFlow {
fn class(&self) -> FlowClass {
TableFlowClass
}
fn as_table<'a>(&'a mut self) -> &'a mut TableFlow {
self
}
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
&mut self.block_flow
}
/// This function finds the specified column widths from column group and the first row.
/// Those are used in fixed table layout calculation.
/* FIXME: automatic table layout calculation */
fn bubble_widths(&mut self, ctx: &mut LayoutContext) {
let mut did_first_row = false;
/* find max width from child block contexts */
for kid in self.block_flow.base.child_iter() {
assert!(kid.is_proper_table_child());
if kid.is_table_colgroup() {
self.col_widths.push_all(kid.as_table_colgroup().widths);
} else if kid.is_table_rowgroup() || kid.is_table_row() {
// read column widths from table-row-group/table-row, and assign
// width=0 for the columns not defined in column-group
// FIXME: need to read widths from either table-header-group OR
// first table-row
let kid_col_widths = if kid.is_table_rowgroup() {
&kid.as_table_rowgroup().col_widths
} else {
&kid.as_table_row().col_widths
};
match self.table_layout {
FixedLayout if!did_first_row => {
did_first_row = true;
let mut child_widths = kid_col_widths.iter();
for col_width in self.col_widths.mut_iter() {
match child_widths.next() {
Some(child_width) => {
if *col_width == Au::new(0) {
*col_width = *child_width;
}
},
None => break
}
}
},
_ => {}
}
let num_child_cols = kid_col_widths.len();
let num_cols = self.col_widths.len();
debug!("colgroup has {} column(s) and child has {} column(s)", num_cols, num_child_cols);
for i in range(num_cols, num_child_cols) {
self.col_widths.push( kid_col_widths[i] );
}
}
}
self.block_flow.bubble_widths(ctx);
}
/// Recursively (top-down) determines the actual width of child contexts and boxes. When called
/// on this context, the context has had its width set by the parent context.
fn assign_widths(&mut self, ctx: &mut LayoutContext) {
debug!("assign_widths({}): assigning width for flow", "table");
// The position was set to the containing block by the flow's parent.
let containing_block_width = self.block_flow.base.position.size.width;
let mut left_content_edge = Au::new(0);
let mut content_width = containing_block_width;
let mut num_unspecified_widths = 0;
let mut total_column_width = Au::new(0);
for col_width in self.col_widths.iter() {
if *col_width == Au::new(0) {
num_unspecified_widths += 1;
} else {
total_column_width = total_column_width.add(col_width);
}
}
let width_computer = InternalTable;
width_computer.compute_used_width(&mut self.block_flow, ctx, containing_block_width);
for box_ in self.block_flow.box_.iter() {
left_content_edge = box_.padding.get().left + box_.border.get().left;
let padding_and_borders = box_.padding.get().left + box_.padding.get().right +
box_.border.get().left + box_.border.get().right;
content_width = box_.border_box.get().size.width - padding_and_borders;
}
// In fixed table layout, we distribute extra space among the unspecified columns if there are
// any, or among all the columns if all are specified.
if (total_column_width < content_width) && (num_unspecified_widths == 0) {
let ratio = content_width.to_f64().unwrap() / total_column_width.to_f64().unwrap();
for col_width in self.col_widths.mut_iter() {
*col_width = (*col_width).scale_by(ratio);
}
} else if num_unspecified_widths!= 0 {
let extra_column_width = (content_width - total_column_width) / Au::new(num_unspecified_widths);
for col_width in self.col_widths.mut_iter() {
if *col_width == Au(0) {
*col_width = extra_column_width;
}
}
}
self.block_flow.propagate_assigned_width_to_children(left_content_edge, content_width, Some(self.col_widths.clone()));
}
/// This is called on kid flows by a parent.
///
/// Hence, we can assume that assign_height has already been called on the
/// kid (because of the bottom-up traversal).
fn assign_height_inorder(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height_inorder: assigning height for table");
self.assign_height_table_base(ctx, true);
}
fn assign_height(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height: assigning height for table");
self.assign_height_table_base(ctx, false);
}
// CSS Section 8.3.1 - Collapsing Margins
// Since `margin` is not used on table box, `collapsing` and `collapsible` are set to 0
fn collapse_margins(&mut self,
_: bool,
_: &mut bool,
_: &mut Au,
_: &mut Au,
collapsing: &mut Au,
collapsible: &mut Au) {
// `margin` is not used on table box.
*collapsing = Au::new(0);
*collapsible = Au::new(0);
}
fn debug_str(&self) -> ~str {
let txt = ~"TableFlow: ";
txt.append(match self.block_flow.box_ {
Some(ref rb) => rb.debug_str(),
None => ~"",
})
}
}
/// Table, TableRowGroup, TableRow, TableCell types.
/// Their widths are calculated in the same way and do not have margins.
pub struct InternalTable;
impl WidthAndMarginsComputer for InternalTable {
/// Compute the used value of width, taking care of min-width and max-width.
///
/// CSS Section 10.4: Minimum and Maximum widths
fn compute_used_width(&self,
block: &mut BlockFlow,
ctx: &mut LayoutContext,
parent_flow_width: Au) {
let input = self.compute_width_constraint_inputs(block, parent_flow_width, ctx);
let solution = self.solve_width_constraints(block, input);
self.set_width_constraint_solutions(block, solution);
}
/// Solve the width and margins constraints for this block flow.
fn solve_width_constraints(&self,
_: &mut BlockFlow,
input: WidthConstraintInput)
-> WidthConstraintSolution {
WidthConstraintSolution::new(input.available_width, Au::new(0), Au::new(0))
}
}
|
let mut block_flow = BlockFlow::float_from_node(constructor, node, float_kind);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
|
identifier_body
|
table.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use layout::box_::Box;
use layout::block::BlockFlow;
use layout::block::{WidthAndMarginsComputer, WidthConstraintInput, WidthConstraintSolution};
use layout::construct::FlowConstructor;
use layout::context::LayoutContext;
use layout::display_list_builder::{DisplayListBuilder, ExtraDisplayListData};
use layout::floats::{FloatKind};
use layout::flow::{TableFlowClass, FlowClass, Flow, ImmutableFlowUtils};
use layout::flow;
use layout::table_wrapper::{TableLayout, FixedLayout, AutoLayout};
use layout::wrapper::ThreadSafeLayoutNode;
use std::cell::RefCell;
use style::computed_values::table_layout;
use geom::{Point2D, Rect, Size2D};
use gfx::display_list::DisplayListCollection;
use servo_util::geometry::Au;
/// A table flow corresponded to the table's internal table box under a table wrapper flow.
/// The properties `position`, `float`, and `margin-*` are used on the table wrapper box,
/// not table box per CSS 2.1 § 10.5.
pub struct TableFlow {
block_flow: BlockFlow,
/// Column widths
col_widths: ~[Au],
/// Table-layout property
table_layout: TableLayout,
}
impl TableFlow {
pub fn from_node_and_box(node: &ThreadSafeLayoutNode,
box_: Box)
-> TableFlow {
let mut block_flow = BlockFlow::from_node_and_box(node, box_);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode)
-> TableFlow {
let mut block_flow = BlockFlow::from_node(constructor, node);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn f
|
constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode,
float_kind: FloatKind)
-> TableFlow {
let mut block_flow = BlockFlow::float_from_node(constructor, node, float_kind);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn teardown(&mut self) {
self.block_flow.teardown();
self.col_widths = ~[];
}
/// Assign height for table flow.
///
/// inline(always) because this is only ever called by in-order or non-in-order top-level
/// methods
#[inline(always)]
fn assign_height_table_base(&mut self, ctx: &mut LayoutContext, inorder: bool) {
let (_, top_offset, bottom_offset, left_offset) = self.block_flow.initialize_offsets(true);
self.block_flow.handle_children_floats_if_necessary(ctx, inorder,
left_offset, top_offset);
let mut cur_y = top_offset;
for kid in self.block_flow.base.child_iter() {
let child_node = flow::mut_base(kid);
child_node.position.origin.y = cur_y;
cur_y = cur_y + child_node.position.size.height;
}
let height = cur_y - top_offset;
let mut noncontent_height = Au::new(0);
for box_ in self.block_flow.box_.iter() {
let mut position = box_.border_box.get();
// noncontent_height = border_top/bottom + padding_top/bottom of box
noncontent_height = box_.noncontent_height();
position.origin.y = Au(0);
position.size.height = height + noncontent_height;
box_.border_box.set(position);
}
self.block_flow.base.position.size.height = height + noncontent_height;
self.block_flow.set_floats_out_if_inorder(inorder, height, cur_y,
top_offset, bottom_offset, left_offset);
}
pub fn build_display_list_table<E:ExtraDisplayListData>(
&mut self,
builder: &DisplayListBuilder,
container_block_size: &Size2D<Au>,
absolute_cb_abs_position: Point2D<Au>,
dirty: &Rect<Au>,
index: uint,
lists: &RefCell<DisplayListCollection<E>>)
-> uint {
debug!("build_display_list_table: same process as block flow");
self.block_flow.build_display_list_block(builder, container_block_size,
absolute_cb_abs_position,
dirty, index, lists)
}
}
impl Flow for TableFlow {
fn class(&self) -> FlowClass {
TableFlowClass
}
fn as_table<'a>(&'a mut self) -> &'a mut TableFlow {
self
}
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
&mut self.block_flow
}
/// This function finds the specified column widths from column group and the first row.
/// Those are used in fixed table layout calculation.
/* FIXME: automatic table layout calculation */
fn bubble_widths(&mut self, ctx: &mut LayoutContext) {
let mut did_first_row = false;
/* find max width from child block contexts */
for kid in self.block_flow.base.child_iter() {
assert!(kid.is_proper_table_child());
if kid.is_table_colgroup() {
self.col_widths.push_all(kid.as_table_colgroup().widths);
} else if kid.is_table_rowgroup() || kid.is_table_row() {
// read column widths from table-row-group/table-row, and assign
// width=0 for the columns not defined in column-group
// FIXME: need to read widths from either table-header-group OR
// first table-row
let kid_col_widths = if kid.is_table_rowgroup() {
&kid.as_table_rowgroup().col_widths
} else {
&kid.as_table_row().col_widths
};
match self.table_layout {
FixedLayout if!did_first_row => {
did_first_row = true;
let mut child_widths = kid_col_widths.iter();
for col_width in self.col_widths.mut_iter() {
match child_widths.next() {
Some(child_width) => {
if *col_width == Au::new(0) {
*col_width = *child_width;
}
},
None => break
}
}
},
_ => {}
}
let num_child_cols = kid_col_widths.len();
let num_cols = self.col_widths.len();
debug!("colgroup has {} column(s) and child has {} column(s)", num_cols, num_child_cols);
for i in range(num_cols, num_child_cols) {
self.col_widths.push( kid_col_widths[i] );
}
}
}
self.block_flow.bubble_widths(ctx);
}
/// Recursively (top-down) determines the actual width of child contexts and boxes. When called
/// on this context, the context has had its width set by the parent context.
fn assign_widths(&mut self, ctx: &mut LayoutContext) {
debug!("assign_widths({}): assigning width for flow", "table");
// The position was set to the containing block by the flow's parent.
let containing_block_width = self.block_flow.base.position.size.width;
let mut left_content_edge = Au::new(0);
let mut content_width = containing_block_width;
let mut num_unspecified_widths = 0;
let mut total_column_width = Au::new(0);
for col_width in self.col_widths.iter() {
if *col_width == Au::new(0) {
num_unspecified_widths += 1;
} else {
total_column_width = total_column_width.add(col_width);
}
}
let width_computer = InternalTable;
width_computer.compute_used_width(&mut self.block_flow, ctx, containing_block_width);
for box_ in self.block_flow.box_.iter() {
left_content_edge = box_.padding.get().left + box_.border.get().left;
let padding_and_borders = box_.padding.get().left + box_.padding.get().right +
box_.border.get().left + box_.border.get().right;
content_width = box_.border_box.get().size.width - padding_and_borders;
}
// In fixed table layout, we distribute extra space among the unspecified columns if there are
// any, or among all the columns if all are specified.
if (total_column_width < content_width) && (num_unspecified_widths == 0) {
let ratio = content_width.to_f64().unwrap() / total_column_width.to_f64().unwrap();
for col_width in self.col_widths.mut_iter() {
*col_width = (*col_width).scale_by(ratio);
}
} else if num_unspecified_widths!= 0 {
let extra_column_width = (content_width - total_column_width) / Au::new(num_unspecified_widths);
for col_width in self.col_widths.mut_iter() {
if *col_width == Au(0) {
*col_width = extra_column_width;
}
}
}
self.block_flow.propagate_assigned_width_to_children(left_content_edge, content_width, Some(self.col_widths.clone()));
}
/// This is called on kid flows by a parent.
///
/// Hence, we can assume that assign_height has already been called on the
/// kid (because of the bottom-up traversal).
fn assign_height_inorder(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height_inorder: assigning height for table");
self.assign_height_table_base(ctx, true);
}
fn assign_height(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height: assigning height for table");
self.assign_height_table_base(ctx, false);
}
// CSS Section 8.3.1 - Collapsing Margins
// Since `margin` is not used on table box, `collapsing` and `collapsible` are set to 0
fn collapse_margins(&mut self,
_: bool,
_: &mut bool,
_: &mut Au,
_: &mut Au,
collapsing: &mut Au,
collapsible: &mut Au) {
// `margin` is not used on table box.
*collapsing = Au::new(0);
*collapsible = Au::new(0);
}
fn debug_str(&self) -> ~str {
let txt = ~"TableFlow: ";
txt.append(match self.block_flow.box_ {
Some(ref rb) => rb.debug_str(),
None => ~"",
})
}
}
/// Table, TableRowGroup, TableRow, TableCell types.
/// Their widths are calculated in the same way and do not have margins.
pub struct InternalTable;
impl WidthAndMarginsComputer for InternalTable {
/// Compute the used value of width, taking care of min-width and max-width.
///
/// CSS Section 10.4: Minimum and Maximum widths
fn compute_used_width(&self,
block: &mut BlockFlow,
ctx: &mut LayoutContext,
parent_flow_width: Au) {
let input = self.compute_width_constraint_inputs(block, parent_flow_width, ctx);
let solution = self.solve_width_constraints(block, input);
self.set_width_constraint_solutions(block, solution);
}
/// Solve the width and margins constraints for this block flow.
fn solve_width_constraints(&self,
_: &mut BlockFlow,
input: WidthConstraintInput)
-> WidthConstraintSolution {
WidthConstraintSolution::new(input.available_width, Au::new(0), Au::new(0))
}
}
|
loat_from_node(
|
identifier_name
|
table.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use layout::box_::Box;
use layout::block::BlockFlow;
use layout::block::{WidthAndMarginsComputer, WidthConstraintInput, WidthConstraintSolution};
use layout::construct::FlowConstructor;
use layout::context::LayoutContext;
use layout::display_list_builder::{DisplayListBuilder, ExtraDisplayListData};
use layout::floats::{FloatKind};
use layout::flow::{TableFlowClass, FlowClass, Flow, ImmutableFlowUtils};
use layout::flow;
use layout::table_wrapper::{TableLayout, FixedLayout, AutoLayout};
use layout::wrapper::ThreadSafeLayoutNode;
use std::cell::RefCell;
use style::computed_values::table_layout;
use geom::{Point2D, Rect, Size2D};
use gfx::display_list::DisplayListCollection;
use servo_util::geometry::Au;
/// A table flow corresponded to the table's internal table box under a table wrapper flow.
/// The properties `position`, `float`, and `margin-*` are used on the table wrapper box,
/// not table box per CSS 2.1 § 10.5.
pub struct TableFlow {
block_flow: BlockFlow,
|
/// Column widths
col_widths: ~[Au],
/// Table-layout property
table_layout: TableLayout,
}
impl TableFlow {
pub fn from_node_and_box(node: &ThreadSafeLayoutNode,
box_: Box)
-> TableFlow {
let mut block_flow = BlockFlow::from_node_and_box(node, box_);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode)
-> TableFlow {
let mut block_flow = BlockFlow::from_node(constructor, node);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn float_from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode,
float_kind: FloatKind)
-> TableFlow {
let mut block_flow = BlockFlow::float_from_node(constructor, node, float_kind);
let table_layout = if block_flow.box_().style().Table.get().table_layout ==
table_layout::fixed {
FixedLayout
} else {
AutoLayout
};
TableFlow {
block_flow: block_flow,
col_widths: ~[],
table_layout: table_layout
}
}
pub fn teardown(&mut self) {
self.block_flow.teardown();
self.col_widths = ~[];
}
/// Assign height for table flow.
///
/// inline(always) because this is only ever called by in-order or non-in-order top-level
/// methods
#[inline(always)]
fn assign_height_table_base(&mut self, ctx: &mut LayoutContext, inorder: bool) {
let (_, top_offset, bottom_offset, left_offset) = self.block_flow.initialize_offsets(true);
self.block_flow.handle_children_floats_if_necessary(ctx, inorder,
left_offset, top_offset);
let mut cur_y = top_offset;
for kid in self.block_flow.base.child_iter() {
let child_node = flow::mut_base(kid);
child_node.position.origin.y = cur_y;
cur_y = cur_y + child_node.position.size.height;
}
let height = cur_y - top_offset;
let mut noncontent_height = Au::new(0);
for box_ in self.block_flow.box_.iter() {
let mut position = box_.border_box.get();
// noncontent_height = border_top/bottom + padding_top/bottom of box
noncontent_height = box_.noncontent_height();
position.origin.y = Au(0);
position.size.height = height + noncontent_height;
box_.border_box.set(position);
}
self.block_flow.base.position.size.height = height + noncontent_height;
self.block_flow.set_floats_out_if_inorder(inorder, height, cur_y,
top_offset, bottom_offset, left_offset);
}
pub fn build_display_list_table<E:ExtraDisplayListData>(
&mut self,
builder: &DisplayListBuilder,
container_block_size: &Size2D<Au>,
absolute_cb_abs_position: Point2D<Au>,
dirty: &Rect<Au>,
index: uint,
lists: &RefCell<DisplayListCollection<E>>)
-> uint {
debug!("build_display_list_table: same process as block flow");
self.block_flow.build_display_list_block(builder, container_block_size,
absolute_cb_abs_position,
dirty, index, lists)
}
}
impl Flow for TableFlow {
fn class(&self) -> FlowClass {
TableFlowClass
}
fn as_table<'a>(&'a mut self) -> &'a mut TableFlow {
self
}
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
&mut self.block_flow
}
/// This function finds the specified column widths from column group and the first row.
/// Those are used in fixed table layout calculation.
/* FIXME: automatic table layout calculation */
fn bubble_widths(&mut self, ctx: &mut LayoutContext) {
let mut did_first_row = false;
/* find max width from child block contexts */
for kid in self.block_flow.base.child_iter() {
assert!(kid.is_proper_table_child());
if kid.is_table_colgroup() {
self.col_widths.push_all(kid.as_table_colgroup().widths);
} else if kid.is_table_rowgroup() || kid.is_table_row() {
// read column widths from table-row-group/table-row, and assign
// width=0 for the columns not defined in column-group
// FIXME: need to read widths from either table-header-group OR
// first table-row
let kid_col_widths = if kid.is_table_rowgroup() {
&kid.as_table_rowgroup().col_widths
} else {
&kid.as_table_row().col_widths
};
match self.table_layout {
FixedLayout if!did_first_row => {
did_first_row = true;
let mut child_widths = kid_col_widths.iter();
for col_width in self.col_widths.mut_iter() {
match child_widths.next() {
Some(child_width) => {
if *col_width == Au::new(0) {
*col_width = *child_width;
}
},
None => break
}
}
},
_ => {}
}
let num_child_cols = kid_col_widths.len();
let num_cols = self.col_widths.len();
debug!("colgroup has {} column(s) and child has {} column(s)", num_cols, num_child_cols);
for i in range(num_cols, num_child_cols) {
self.col_widths.push( kid_col_widths[i] );
}
}
}
self.block_flow.bubble_widths(ctx);
}
/// Recursively (top-down) determines the actual width of child contexts and boxes. When called
/// on this context, the context has had its width set by the parent context.
fn assign_widths(&mut self, ctx: &mut LayoutContext) {
debug!("assign_widths({}): assigning width for flow", "table");
// The position was set to the containing block by the flow's parent.
let containing_block_width = self.block_flow.base.position.size.width;
let mut left_content_edge = Au::new(0);
let mut content_width = containing_block_width;
let mut num_unspecified_widths = 0;
let mut total_column_width = Au::new(0);
for col_width in self.col_widths.iter() {
if *col_width == Au::new(0) {
num_unspecified_widths += 1;
} else {
total_column_width = total_column_width.add(col_width);
}
}
let width_computer = InternalTable;
width_computer.compute_used_width(&mut self.block_flow, ctx, containing_block_width);
for box_ in self.block_flow.box_.iter() {
left_content_edge = box_.padding.get().left + box_.border.get().left;
let padding_and_borders = box_.padding.get().left + box_.padding.get().right +
box_.border.get().left + box_.border.get().right;
content_width = box_.border_box.get().size.width - padding_and_borders;
}
// In fixed table layout, we distribute extra space among the unspecified columns if there are
// any, or among all the columns if all are specified.
if (total_column_width < content_width) && (num_unspecified_widths == 0) {
let ratio = content_width.to_f64().unwrap() / total_column_width.to_f64().unwrap();
for col_width in self.col_widths.mut_iter() {
*col_width = (*col_width).scale_by(ratio);
}
} else if num_unspecified_widths!= 0 {
let extra_column_width = (content_width - total_column_width) / Au::new(num_unspecified_widths);
for col_width in self.col_widths.mut_iter() {
if *col_width == Au(0) {
*col_width = extra_column_width;
}
}
}
self.block_flow.propagate_assigned_width_to_children(left_content_edge, content_width, Some(self.col_widths.clone()));
}
/// This is called on kid flows by a parent.
///
/// Hence, we can assume that assign_height has already been called on the
/// kid (because of the bottom-up traversal).
fn assign_height_inorder(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height_inorder: assigning height for table");
self.assign_height_table_base(ctx, true);
}
fn assign_height(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height: assigning height for table");
self.assign_height_table_base(ctx, false);
}
// CSS Section 8.3.1 - Collapsing Margins
// Since `margin` is not used on table box, `collapsing` and `collapsible` are set to 0
fn collapse_margins(&mut self,
_: bool,
_: &mut bool,
_: &mut Au,
_: &mut Au,
collapsing: &mut Au,
collapsible: &mut Au) {
// `margin` is not used on table box.
*collapsing = Au::new(0);
*collapsible = Au::new(0);
}
fn debug_str(&self) -> ~str {
let txt = ~"TableFlow: ";
txt.append(match self.block_flow.box_ {
Some(ref rb) => rb.debug_str(),
None => ~"",
})
}
}
/// Table, TableRowGroup, TableRow, TableCell types.
/// Their widths are calculated in the same way and do not have margins.
pub struct InternalTable;
impl WidthAndMarginsComputer for InternalTable {
/// Compute the used value of width, taking care of min-width and max-width.
///
/// CSS Section 10.4: Minimum and Maximum widths
fn compute_used_width(&self,
block: &mut BlockFlow,
ctx: &mut LayoutContext,
parent_flow_width: Au) {
let input = self.compute_width_constraint_inputs(block, parent_flow_width, ctx);
let solution = self.solve_width_constraints(block, input);
self.set_width_constraint_solutions(block, solution);
}
/// Solve the width and margins constraints for this block flow.
fn solve_width_constraints(&self,
_: &mut BlockFlow,
input: WidthConstraintInput)
-> WidthConstraintSolution {
WidthConstraintSolution::new(input.available_width, Au::new(0), Au::new(0))
}
}
|
random_line_split
|
|
instance.rs
|
use std::io::{self, BufRead, BufReader, ErrorKind, Result};
use std::net::TcpStream;
use std::process::{Child, Command, Stdio};
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::Arc;
use std::thread::{self, JoinHandle};
use std::time::Duration;
use avro_rs::{from_value, Reader, Schema, Writer};
use super::config::InstanceConfig;
use super::request::Request;
use super::response::Response;
fn initial_connect(child: &mut Child) -> Result<TcpStream> {
let child_stdout = child
.stdout
.as_mut()
.expect("Failed to retrieve child stdout");
let mut reader = BufReader::new(child_stdout);
let mut listen_line = String::new();
let length = reader.read_line(&mut listen_line)?;
if length == 0 {
return Err(io::Error::new(
ErrorKind::BrokenPipe,
"read zero length string from child stdout",
));
}
if!listen_line.starts_with("LISTENING ") {
return Err(io::Error::new(
ErrorKind::InvalidData,
"expected LISTENING line from child stdout",
));
}
let address = listen_line.split(' ').nth(1).ok_or(io::Error::new(
ErrorKind::InvalidData,
"received invalid LISTENING line",
))?;
TcpStream::connect(address)
}
#[derive(Debug)]
pub struct Instance {
config: InstanceConfig,
child: Child,
handle: JoinHandle<()>,
sender: Sender<Arc<InstanceMessage>>,
}
#[derive(Debug)]
struct
|
{
request: Request,
response: Option<Response>,
}
#[derive(Debug)]
struct InstanceContext {
stream: TcpStream,
receiver: Receiver<Arc<InstanceMessage>>,
}
impl Instance {
pub fn new(config: InstanceConfig) -> Result<Instance> {
let mut child = Command::new(&config.command)
.stdout(Stdio::piped())
.spawn()?;
let stream = initial_connect(&mut child)?;
let (sender, receiver) = channel();
let mut context = InstanceContext { stream, receiver };
let handle = thread::spawn(move || {
context.run();
});
Ok(Instance {
config,
child,
handle,
sender,
})
}
pub fn process_request(&mut self, request: Request) -> Response {
let message = Arc::new(InstanceMessage {
request,
response: None,
});
self.sender
.send(message.clone())
.expect("Failed to send message");
// TODO: wait for command to finish or time out
thread::sleep(Duration::from_millis(1000 * 5));
let message = Arc::try_unwrap(message).expect("Existing references to Arc");
message.response.expect("Failed to process request")
}
}
impl InstanceContext {
fn run(&mut self) {
// parse the plugin message schemas
let request_schema = Schema::parse_str(include_str!("request.avsc"))
.expect("Failed to parse request message schema");
let response_schema = Schema::parse_str(include_str!("response.avsc"))
.expect("Failed to parse response message schema");
// create request writer and response reader
let mut writer = Writer::new(&request_schema, &self.stream);
let mut reader = Reader::with_schema(&response_schema, &self.stream)
.expect("Failed to create Avro response reader");
// process requests
loop {
let mut message = match self.receiver.try_recv() {
Ok(message) => message,
Err(TryRecvError::Empty) => {
// rate limit
thread::sleep(Duration::from_millis(100));
// try again
continue;
}
Err(TryRecvError::Disconnected) => {
// time to quit
break;
}
};
// XXX: not sure if this is the best way
let message =
Arc::get_mut(&mut message).expect("Failed to get mutable message reference");
// send the request
writer
.append_ser(&message.request)
.expect("Failed to serialize request");
writer.flush().expect("Failed to flush writer");
// receive the response
let value = reader
.next()
.expect("Failed to read response")
.expect("Failed to unserialize response");
message.response =
Some(from_value::<Response>(&value).expect("Failed to unserialize response"));
// TODO: notify whoever is waiting the command is complete
}
}
}
|
InstanceMessage
|
identifier_name
|
instance.rs
|
use std::io::{self, BufRead, BufReader, ErrorKind, Result};
use std::net::TcpStream;
use std::process::{Child, Command, Stdio};
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::Arc;
use std::thread::{self, JoinHandle};
|
use super::config::InstanceConfig;
use super::request::Request;
use super::response::Response;
fn initial_connect(child: &mut Child) -> Result<TcpStream> {
let child_stdout = child
.stdout
.as_mut()
.expect("Failed to retrieve child stdout");
let mut reader = BufReader::new(child_stdout);
let mut listen_line = String::new();
let length = reader.read_line(&mut listen_line)?;
if length == 0 {
return Err(io::Error::new(
ErrorKind::BrokenPipe,
"read zero length string from child stdout",
));
}
if!listen_line.starts_with("LISTENING ") {
return Err(io::Error::new(
ErrorKind::InvalidData,
"expected LISTENING line from child stdout",
));
}
let address = listen_line.split(' ').nth(1).ok_or(io::Error::new(
ErrorKind::InvalidData,
"received invalid LISTENING line",
))?;
TcpStream::connect(address)
}
#[derive(Debug)]
pub struct Instance {
config: InstanceConfig,
child: Child,
handle: JoinHandle<()>,
sender: Sender<Arc<InstanceMessage>>,
}
#[derive(Debug)]
struct InstanceMessage {
request: Request,
response: Option<Response>,
}
#[derive(Debug)]
struct InstanceContext {
stream: TcpStream,
receiver: Receiver<Arc<InstanceMessage>>,
}
impl Instance {
pub fn new(config: InstanceConfig) -> Result<Instance> {
let mut child = Command::new(&config.command)
.stdout(Stdio::piped())
.spawn()?;
let stream = initial_connect(&mut child)?;
let (sender, receiver) = channel();
let mut context = InstanceContext { stream, receiver };
let handle = thread::spawn(move || {
context.run();
});
Ok(Instance {
config,
child,
handle,
sender,
})
}
pub fn process_request(&mut self, request: Request) -> Response {
let message = Arc::new(InstanceMessage {
request,
response: None,
});
self.sender
.send(message.clone())
.expect("Failed to send message");
// TODO: wait for command to finish or time out
thread::sleep(Duration::from_millis(1000 * 5));
let message = Arc::try_unwrap(message).expect("Existing references to Arc");
message.response.expect("Failed to process request")
}
}
impl InstanceContext {
fn run(&mut self) {
// parse the plugin message schemas
let request_schema = Schema::parse_str(include_str!("request.avsc"))
.expect("Failed to parse request message schema");
let response_schema = Schema::parse_str(include_str!("response.avsc"))
.expect("Failed to parse response message schema");
// create request writer and response reader
let mut writer = Writer::new(&request_schema, &self.stream);
let mut reader = Reader::with_schema(&response_schema, &self.stream)
.expect("Failed to create Avro response reader");
// process requests
loop {
let mut message = match self.receiver.try_recv() {
Ok(message) => message,
Err(TryRecvError::Empty) => {
// rate limit
thread::sleep(Duration::from_millis(100));
// try again
continue;
}
Err(TryRecvError::Disconnected) => {
// time to quit
break;
}
};
// XXX: not sure if this is the best way
let message =
Arc::get_mut(&mut message).expect("Failed to get mutable message reference");
// send the request
writer
.append_ser(&message.request)
.expect("Failed to serialize request");
writer.flush().expect("Failed to flush writer");
// receive the response
let value = reader
.next()
.expect("Failed to read response")
.expect("Failed to unserialize response");
message.response =
Some(from_value::<Response>(&value).expect("Failed to unserialize response"));
// TODO: notify whoever is waiting the command is complete
}
}
}
|
use std::time::Duration;
use avro_rs::{from_value, Reader, Schema, Writer};
|
random_line_split
|
rc.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Task-local reference-counted boxes (`Rc` type)
The `Rc` type provides shared ownership of an immutable value. Destruction is
deterministic, and will occur as soon as the last owner is gone. It is marked
as non-sendable because it avoids the overhead of atomic reference counting.
The `downgrade` method can be used to create a non-owning `Weak` pointer to the
box. A `Weak` pointer can be upgraded to an `Rc` pointer, but will return
`None` if the value has already been freed.
For example, a tree with parent pointers can be represented by putting the
nodes behind strong `Rc` pointers, and then storing the parent pointers as
`Weak` pointers.
## Examples
Consider a scenario where a set of Gadgets are owned by a given Owner. We want
to have our Gadgets point to their Owner. We can't do this with unique
ownership, because more than one gadget may belong to the same Owner. Rc
allows us to share an Owner between multiple Gadgets, and have the Owner kept
alive as long as any Gadget points at it.
```rust
use std::rc::Rc;
struct Owner {
name: String
//...other fields
}
struct Gadget {
id: int,
owner: Rc<Owner>
//...other fields
}
fn main() {
// Create a reference counted Owner.
let gadget_owner : Rc<Owner> = Rc::new(
Owner { name: String::from_str("Gadget Man") }
);
// Create Gadgets belonging to gadget_owner. To increment the reference
// count we clone the Rc object.
let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() };
let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() };
drop(gadget_owner);
// Despite dropping gadget_owner, we're still able to print out the name of
// the Owner of the Gadgets. This is because we've only dropped the
// reference count object, not the Owner it wraps. As long as there are
// other Rc objects pointing at the same Owner, it will stay alive. Notice
// that the Rc wrapper around Gadget.owner gets automatically dereferenced
// for us.
println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name);
println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name);
// At the end of the method, gadget1 and gadget2 get destroyed, and with
// them the last counted references to our Owner. Gadget Man now gets
// destroyed as well.
}
```
If our requirements change, and we also need to be able to traverse from
Owner->Gadget, we will run into problems: an Rc pointer from Owner->Gadget
introduces a cycle between the objects. This means that their reference counts
can never reach 0, and the objects will stay alive: a memory leak. In order to
get around this, we can use `Weak` pointers. These are reference counted
pointers that don't keep an object alive if there are no normal `Rc` (or
*strong*) pointers left.
Rust actually makes it somewhat difficult to produce this loop in the first
place: in order to end up with two objects that point at each other, one of
them needs to be mutable. This is problematic because Rc enforces memory
safety by only giving out shared references to the object it wraps, and these
don't allow direct mutation. We need to wrap the part of the object we wish to
mutate in a `RefCell`, which provides *interior mutability*: a method to
achieve mutability through a shared reference. `RefCell` enforces Rust's
borrowing rules at runtime. Read the `Cell` documentation for more details on
interior mutability.
```rust
use std::rc::Rc;
use std::rc::Weak;
use std::cell::RefCell;
struct Owner {
name: String,
gadgets: RefCell<Vec<Weak<Gadget>>>
//...other fields
}
struct Gadget {
id: int,
owner: Rc<Owner>
//...other fields
}
fn main() {
// Create a reference counted Owner. Note the fact that we've put the
// Owner's vector of Gadgets inside a RefCell so that we can mutate it
// through a shared reference.
let gadget_owner : Rc<Owner> = Rc::new(
Owner {
name: "Gadget Man".to_string(),
gadgets: RefCell::new(Vec::new())
}
);
// Create Gadgets belonging to gadget_owner as before.
let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()});
let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()});
// Add the Gadgets to their Owner. To do this we mutably borrow from
// the RefCell holding the Owner's Gadgets.
gadget_owner.gadgets.borrow_mut().push(gadget1.clone().downgrade());
gadget_owner.gadgets.borrow_mut().push(gadget2.clone().downgrade());
// Iterate over our Gadgets, printing their details out
for gadget_opt in gadget_owner.gadgets.borrow().iter() {
// gadget_opt is a Weak<Gadget>. Since weak pointers can't guarantee
// that their object is still alive, we need to call upgrade() on them
// to turn them into a strong reference. This returns an Option, which
// contains a reference to our object if it still exists.
let gadget = gadget_opt.upgrade().unwrap();
println!("Gadget {} owned by {}", gadget.id, gadget.owner.name);
}
// At the end of the method, gadget_owner, gadget1 and gadget2 get
// destroyed. There are now no strong (Rc) references to the gadgets.
// Once they get destroyed, the Gadgets get destroyed. This zeroes the
// reference count on Gadget Man, so he gets destroyed as well.
}
```
*/
use core::mem::transmute;
use core::cell::Cell;
use core::clone::Clone;
use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::kinds::marker;
use core::ops::{Deref, Drop};
use core::option::{Option, Some, None};
use core::ptr;
use core::ptr::RawPtr;
use core::mem::{min_align_of, size_of};
use core::fmt;
use heap::deallocate;
struct RcBox<T> {
value: T,
strong: Cell<uint>,
weak: Cell<uint>
}
/// Immutable reference counted pointer type
#[unsafe_no_drop_flag]
pub struct Rc<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Rc<T> {
/// Construct a new reference-counted box
pub fn new(value: T) -> Rc<T> {
unsafe {
Rc {
// there is an implicit weak pointer owned by all the
// strong pointers, which ensures that the weak
// destructor never frees the allocation while the
// strong destructor is running, even if the weak
// pointer is stored inside the strong one.
_ptr: transmute(box RcBox {
value: value,
strong: Cell::new(1),
weak: Cell::new(1)
}),
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
}
impl<T> Rc<T> {
/// Downgrade the reference-counted pointer to a weak reference
pub fn downgrade(&self) -> Weak<T> {
self.inc_weak();
Weak {
_ptr: self._ptr,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
impl<T: Clone> Rc<T> {
/// Acquires a mutable pointer to the inner contents by guaranteeing that
/// the reference count is one (no sharing is possible).
///
/// This is also referred to as a copy-on-write operation because the inner
/// data is cloned if the reference count is greater than one.
#[inline]
#[experimental]
pub fn make_unique<'a>(&'a mut self) -> &'a mut T {
// Note that we hold a strong reference, which also counts as
// a weak reference, so we only clone if there is an
// additional reference of either kind.
if self.strong()!= 1 || self.weak()!= 1 {
*self = Rc::new(self.deref().clone())
}
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Rc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
let inner = unsafe { &mut *self._ptr };
&mut inner.value
}
}
impl<T> Deref<T> for Rc<T> {
/// Borrow the value contained in the reference-counted box
#[inline(always)]
fn deref<'a>(&'a self) -> &'a T {
&self.inner().value
}
}
#[unsafe_destructor]
impl<T> Drop for Rc<T> {
fn drop(&mut self) {
unsafe {
if!self._ptr.is_null() {
self.dec_strong();
if self.strong() == 0 {
ptr::read(self.deref()); // destroy the contained object
// remove the implicit "strong weak" pointer now
// that we've destroyed the contents.
self.dec_weak();
if self.weak() == 0 {
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
}
#[unstable]
impl<T> Clone for Rc<T> {
#[inline]
fn clone(&self) -> Rc<T> {
self.inc_strong();
Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
impl<T: Default> Default for Rc<T> {
#[inline]
fn default() -> Rc<T> {
Rc::new(Default::default())
}
}
impl<T: PartialEq> PartialEq for Rc<T> {
#[inline(always)]
fn eq(&self, other: &Rc<T>) -> bool { **self == **other }
#[inline(always)]
fn ne(&self, other: &Rc<T>) -> bool { **self!= **other }
}
impl<T: Eq> Eq for Rc<T> {}
impl<T: PartialOrd> PartialOrd for Rc<T> {
#[inline(always)]
fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
#[inline(always)]
fn lt(&self, other: &Rc<T>) -> bool { **self < **other }
#[inline(always)]
fn le(&self, other: &Rc<T>) -> bool { **self <= **other }
#[inline(always)]
fn gt(&self, other: &Rc<T>) -> bool { **self > **other }
|
impl<T: Ord> Ord for Rc<T> {
#[inline]
fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) }
}
impl<T: fmt::Show> fmt::Show for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
/// Weak reference to a reference-counted box
#[unsafe_no_drop_flag]
pub struct Weak<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Weak<T> {
/// Upgrade a weak reference to a strong reference
pub fn upgrade(&self) -> Option<Rc<T>> {
if self.strong() == 0 {
None
} else {
self.inc_strong();
Some(Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare })
}
}
}
#[unsafe_destructor]
impl<T> Drop for Weak<T> {
fn drop(&mut self) {
unsafe {
if!self._ptr.is_null() {
self.dec_weak();
// the weak count starts at 1, and will only go to
// zero if all the strong pointers have disappeared.
if self.weak() == 0 {
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
#[unstable]
impl<T> Clone for Weak<T> {
#[inline]
fn clone(&self) -> Weak<T> {
self.inc_weak();
Weak { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
#[doc(hidden)]
trait RcBoxPtr<T> {
fn inner<'a>(&'a self) -> &'a RcBox<T>;
#[inline]
fn strong(&self) -> uint { self.inner().strong.get() }
#[inline]
fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
#[inline]
fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
#[inline]
fn weak(&self) -> uint { self.inner().weak.get() }
#[inline]
fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
#[inline]
fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
}
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
#[cfg(test)]
#[allow(experimental)]
mod tests {
use super::{Rc, Weak};
use std::cell::RefCell;
use std::option::{Option, Some, None};
use std::mem::drop;
use std::clone::Clone;
#[test]
fn test_clone() {
let x = Rc::new(RefCell::new(5i));
let y = x.clone();
*x.borrow_mut() = 20;
assert_eq!(*y.borrow(), 20);
}
#[test]
fn test_simple() {
let x = Rc::new(5i);
assert_eq!(*x, 5);
}
#[test]
fn test_simple_clone() {
let x = Rc::new(5i);
let y = x.clone();
assert_eq!(*x, 5);
assert_eq!(*y, 5);
}
#[test]
fn test_destructor() {
let x = Rc::new(box 5i);
assert_eq!(**x, 5);
}
#[test]
fn test_live() {
let x = Rc::new(5i);
let y = x.downgrade();
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Rc::new(5i);
let y = x.downgrade();
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn gc_inside() {
// see issue #11532
use std::gc::GC;
let a = Rc::new(RefCell::new(box(GC) 1i));
assert!(a.try_borrow_mut().is_some());
}
#[test]
fn weak_self_cyclic() {
struct Cycle {
x: RefCell<Option<Weak<Cycle>>>
}
let a = Rc::new(Cycle { x: RefCell::new(None) });
let b = a.clone().downgrade();
*a.x.borrow_mut() = Some(b);
// hopefully we don't double-free (or leak)...
}
#[test]
fn test_cowrc_clone_make_unique() {
let mut cow0 = Rc::new(75u);
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
assert!(75 == *cow0.make_unique());
assert!(75 == *cow1.make_unique());
assert!(75 == *cow2.make_unique());
*cow0.make_unique() += 1;
*cow1.make_unique() += 2;
*cow2.make_unique() += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(78 == *cow2);
// none should point to the same backing memory
assert!(*cow0!= *cow1);
assert!(*cow0!= *cow2);
assert!(*cow1!= *cow2);
}
#[test]
fn test_cowrc_clone_unique2() {
let mut cow0 = Rc::new(75u);
let cow1 = cow0.clone();
let cow2 = cow1.clone();
assert!(75 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
*cow0.make_unique() += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
// cow1 and cow2 should share the same contents
// cow0 should have a unique reference
assert!(*cow0!= *cow1);
assert!(*cow0!= *cow2);
assert!(*cow1 == *cow2);
}
#[test]
fn test_cowrc_clone_weak() {
let mut cow0 = Rc::new(75u);
let cow1_weak = cow0.downgrade();
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
*cow0.make_unique() += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
}
}
|
#[inline(always)]
fn ge(&self, other: &Rc<T>) -> bool { **self >= **other }
}
|
random_line_split
|
rc.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Task-local reference-counted boxes (`Rc` type)
The `Rc` type provides shared ownership of an immutable value. Destruction is
deterministic, and will occur as soon as the last owner is gone. It is marked
as non-sendable because it avoids the overhead of atomic reference counting.
The `downgrade` method can be used to create a non-owning `Weak` pointer to the
box. A `Weak` pointer can be upgraded to an `Rc` pointer, but will return
`None` if the value has already been freed.
For example, a tree with parent pointers can be represented by putting the
nodes behind strong `Rc` pointers, and then storing the parent pointers as
`Weak` pointers.
## Examples
Consider a scenario where a set of Gadgets are owned by a given Owner. We want
to have our Gadgets point to their Owner. We can't do this with unique
ownership, because more than one gadget may belong to the same Owner. Rc
allows us to share an Owner between multiple Gadgets, and have the Owner kept
alive as long as any Gadget points at it.
```rust
use std::rc::Rc;
struct Owner {
name: String
//...other fields
}
struct Gadget {
id: int,
owner: Rc<Owner>
//...other fields
}
fn main() {
// Create a reference counted Owner.
let gadget_owner : Rc<Owner> = Rc::new(
Owner { name: String::from_str("Gadget Man") }
);
// Create Gadgets belonging to gadget_owner. To increment the reference
// count we clone the Rc object.
let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() };
let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() };
drop(gadget_owner);
// Despite dropping gadget_owner, we're still able to print out the name of
// the Owner of the Gadgets. This is because we've only dropped the
// reference count object, not the Owner it wraps. As long as there are
// other Rc objects pointing at the same Owner, it will stay alive. Notice
// that the Rc wrapper around Gadget.owner gets automatically dereferenced
// for us.
println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name);
println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name);
// At the end of the method, gadget1 and gadget2 get destroyed, and with
// them the last counted references to our Owner. Gadget Man now gets
// destroyed as well.
}
```
If our requirements change, and we also need to be able to traverse from
Owner->Gadget, we will run into problems: an Rc pointer from Owner->Gadget
introduces a cycle between the objects. This means that their reference counts
can never reach 0, and the objects will stay alive: a memory leak. In order to
get around this, we can use `Weak` pointers. These are reference counted
pointers that don't keep an object alive if there are no normal `Rc` (or
*strong*) pointers left.
Rust actually makes it somewhat difficult to produce this loop in the first
place: in order to end up with two objects that point at each other, one of
them needs to be mutable. This is problematic because Rc enforces memory
safety by only giving out shared references to the object it wraps, and these
don't allow direct mutation. We need to wrap the part of the object we wish to
mutate in a `RefCell`, which provides *interior mutability*: a method to
achieve mutability through a shared reference. `RefCell` enforces Rust's
borrowing rules at runtime. Read the `Cell` documentation for more details on
interior mutability.
```rust
use std::rc::Rc;
use std::rc::Weak;
use std::cell::RefCell;
struct Owner {
name: String,
gadgets: RefCell<Vec<Weak<Gadget>>>
//...other fields
}
struct Gadget {
id: int,
owner: Rc<Owner>
//...other fields
}
fn main() {
// Create a reference counted Owner. Note the fact that we've put the
// Owner's vector of Gadgets inside a RefCell so that we can mutate it
// through a shared reference.
let gadget_owner : Rc<Owner> = Rc::new(
Owner {
name: "Gadget Man".to_string(),
gadgets: RefCell::new(Vec::new())
}
);
// Create Gadgets belonging to gadget_owner as before.
let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()});
let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()});
// Add the Gadgets to their Owner. To do this we mutably borrow from
// the RefCell holding the Owner's Gadgets.
gadget_owner.gadgets.borrow_mut().push(gadget1.clone().downgrade());
gadget_owner.gadgets.borrow_mut().push(gadget2.clone().downgrade());
// Iterate over our Gadgets, printing their details out
for gadget_opt in gadget_owner.gadgets.borrow().iter() {
// gadget_opt is a Weak<Gadget>. Since weak pointers can't guarantee
// that their object is still alive, we need to call upgrade() on them
// to turn them into a strong reference. This returns an Option, which
// contains a reference to our object if it still exists.
let gadget = gadget_opt.upgrade().unwrap();
println!("Gadget {} owned by {}", gadget.id, gadget.owner.name);
}
// At the end of the method, gadget_owner, gadget1 and gadget2 get
// destroyed. There are now no strong (Rc) references to the gadgets.
// Once they get destroyed, the Gadgets get destroyed. This zeroes the
// reference count on Gadget Man, so he gets destroyed as well.
}
```
*/
use core::mem::transmute;
use core::cell::Cell;
use core::clone::Clone;
use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::kinds::marker;
use core::ops::{Deref, Drop};
use core::option::{Option, Some, None};
use core::ptr;
use core::ptr::RawPtr;
use core::mem::{min_align_of, size_of};
use core::fmt;
use heap::deallocate;
struct RcBox<T> {
value: T,
strong: Cell<uint>,
weak: Cell<uint>
}
/// Immutable reference counted pointer type
#[unsafe_no_drop_flag]
pub struct Rc<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Rc<T> {
/// Construct a new reference-counted box
pub fn new(value: T) -> Rc<T> {
unsafe {
Rc {
// there is an implicit weak pointer owned by all the
// strong pointers, which ensures that the weak
// destructor never frees the allocation while the
// strong destructor is running, even if the weak
// pointer is stored inside the strong one.
_ptr: transmute(box RcBox {
value: value,
strong: Cell::new(1),
weak: Cell::new(1)
}),
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
}
impl<T> Rc<T> {
/// Downgrade the reference-counted pointer to a weak reference
pub fn downgrade(&self) -> Weak<T> {
self.inc_weak();
Weak {
_ptr: self._ptr,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
impl<T: Clone> Rc<T> {
/// Acquires a mutable pointer to the inner contents by guaranteeing that
/// the reference count is one (no sharing is possible).
///
/// This is also referred to as a copy-on-write operation because the inner
/// data is cloned if the reference count is greater than one.
#[inline]
#[experimental]
pub fn make_unique<'a>(&'a mut self) -> &'a mut T {
// Note that we hold a strong reference, which also counts as
// a weak reference, so we only clone if there is an
// additional reference of either kind.
if self.strong()!= 1 || self.weak()!= 1 {
*self = Rc::new(self.deref().clone())
}
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Rc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
let inner = unsafe { &mut *self._ptr };
&mut inner.value
}
}
impl<T> Deref<T> for Rc<T> {
/// Borrow the value contained in the reference-counted box
#[inline(always)]
fn deref<'a>(&'a self) -> &'a T {
&self.inner().value
}
}
#[unsafe_destructor]
impl<T> Drop for Rc<T> {
fn drop(&mut self) {
unsafe {
if!self._ptr.is_null() {
self.dec_strong();
if self.strong() == 0 {
ptr::read(self.deref()); // destroy the contained object
// remove the implicit "strong weak" pointer now
// that we've destroyed the contents.
self.dec_weak();
if self.weak() == 0 {
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
}
#[unstable]
impl<T> Clone for Rc<T> {
#[inline]
fn clone(&self) -> Rc<T> {
self.inc_strong();
Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
impl<T: Default> Default for Rc<T> {
#[inline]
fn default() -> Rc<T> {
Rc::new(Default::default())
}
}
impl<T: PartialEq> PartialEq for Rc<T> {
#[inline(always)]
fn eq(&self, other: &Rc<T>) -> bool { **self == **other }
#[inline(always)]
fn ne(&self, other: &Rc<T>) -> bool { **self!= **other }
}
impl<T: Eq> Eq for Rc<T> {}
impl<T: PartialOrd> PartialOrd for Rc<T> {
#[inline(always)]
fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
#[inline(always)]
fn lt(&self, other: &Rc<T>) -> bool { **self < **other }
#[inline(always)]
fn le(&self, other: &Rc<T>) -> bool { **self <= **other }
#[inline(always)]
fn gt(&self, other: &Rc<T>) -> bool { **self > **other }
#[inline(always)]
fn ge(&self, other: &Rc<T>) -> bool { **self >= **other }
}
impl<T: Ord> Ord for Rc<T> {
#[inline]
fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) }
}
impl<T: fmt::Show> fmt::Show for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
/// Weak reference to a reference-counted box
#[unsafe_no_drop_flag]
pub struct Weak<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Weak<T> {
/// Upgrade a weak reference to a strong reference
pub fn upgrade(&self) -> Option<Rc<T>> {
if self.strong() == 0 {
None
} else {
self.inc_strong();
Some(Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare })
}
}
}
#[unsafe_destructor]
impl<T> Drop for Weak<T> {
fn drop(&mut self) {
unsafe {
if!self._ptr.is_null() {
self.dec_weak();
// the weak count starts at 1, and will only go to
// zero if all the strong pointers have disappeared.
if self.weak() == 0
|
}
}
}
}
#[unstable]
impl<T> Clone for Weak<T> {
#[inline]
fn clone(&self) -> Weak<T> {
self.inc_weak();
Weak { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
#[doc(hidden)]
trait RcBoxPtr<T> {
fn inner<'a>(&'a self) -> &'a RcBox<T>;
#[inline]
fn strong(&self) -> uint { self.inner().strong.get() }
#[inline]
fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
#[inline]
fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
#[inline]
fn weak(&self) -> uint { self.inner().weak.get() }
#[inline]
fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
#[inline]
fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
}
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
#[cfg(test)]
#[allow(experimental)]
mod tests {
use super::{Rc, Weak};
use std::cell::RefCell;
use std::option::{Option, Some, None};
use std::mem::drop;
use std::clone::Clone;
#[test]
fn test_clone() {
let x = Rc::new(RefCell::new(5i));
let y = x.clone();
*x.borrow_mut() = 20;
assert_eq!(*y.borrow(), 20);
}
#[test]
fn test_simple() {
let x = Rc::new(5i);
assert_eq!(*x, 5);
}
#[test]
fn test_simple_clone() {
let x = Rc::new(5i);
let y = x.clone();
assert_eq!(*x, 5);
assert_eq!(*y, 5);
}
#[test]
fn test_destructor() {
let x = Rc::new(box 5i);
assert_eq!(**x, 5);
}
#[test]
fn test_live() {
let x = Rc::new(5i);
let y = x.downgrade();
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Rc::new(5i);
let y = x.downgrade();
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn gc_inside() {
// see issue #11532
use std::gc::GC;
let a = Rc::new(RefCell::new(box(GC) 1i));
assert!(a.try_borrow_mut().is_some());
}
#[test]
fn weak_self_cyclic() {
struct Cycle {
x: RefCell<Option<Weak<Cycle>>>
}
let a = Rc::new(Cycle { x: RefCell::new(None) });
let b = a.clone().downgrade();
*a.x.borrow_mut() = Some(b);
// hopefully we don't double-free (or leak)...
}
#[test]
fn test_cowrc_clone_make_unique() {
let mut cow0 = Rc::new(75u);
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
assert!(75 == *cow0.make_unique());
assert!(75 == *cow1.make_unique());
assert!(75 == *cow2.make_unique());
*cow0.make_unique() += 1;
*cow1.make_unique() += 2;
*cow2.make_unique() += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(78 == *cow2);
// none should point to the same backing memory
assert!(*cow0!= *cow1);
assert!(*cow0!= *cow2);
assert!(*cow1!= *cow2);
}
#[test]
fn test_cowrc_clone_unique2() {
let mut cow0 = Rc::new(75u);
let cow1 = cow0.clone();
let cow2 = cow1.clone();
assert!(75 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
*cow0.make_unique() += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
// cow1 and cow2 should share the same contents
// cow0 should have a unique reference
assert!(*cow0!= *cow1);
assert!(*cow0!= *cow2);
assert!(*cow1 == *cow2);
}
#[test]
fn test_cowrc_clone_weak() {
let mut cow0 = Rc::new(75u);
let cow1_weak = cow0.downgrade();
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
*cow0.make_unique() += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
}
}
|
{
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
|
conditional_block
|
rc.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Task-local reference-counted boxes (`Rc` type)
The `Rc` type provides shared ownership of an immutable value. Destruction is
deterministic, and will occur as soon as the last owner is gone. It is marked
as non-sendable because it avoids the overhead of atomic reference counting.
The `downgrade` method can be used to create a non-owning `Weak` pointer to the
box. A `Weak` pointer can be upgraded to an `Rc` pointer, but will return
`None` if the value has already been freed.
For example, a tree with parent pointers can be represented by putting the
nodes behind strong `Rc` pointers, and then storing the parent pointers as
`Weak` pointers.
## Examples
Consider a scenario where a set of Gadgets are owned by a given Owner. We want
to have our Gadgets point to their Owner. We can't do this with unique
ownership, because more than one gadget may belong to the same Owner. Rc
allows us to share an Owner between multiple Gadgets, and have the Owner kept
alive as long as any Gadget points at it.
```rust
use std::rc::Rc;
struct Owner {
name: String
//...other fields
}
struct Gadget {
id: int,
owner: Rc<Owner>
//...other fields
}
fn main() {
// Create a reference counted Owner.
let gadget_owner : Rc<Owner> = Rc::new(
Owner { name: String::from_str("Gadget Man") }
);
// Create Gadgets belonging to gadget_owner. To increment the reference
// count we clone the Rc object.
let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() };
let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() };
drop(gadget_owner);
// Despite dropping gadget_owner, we're still able to print out the name of
// the Owner of the Gadgets. This is because we've only dropped the
// reference count object, not the Owner it wraps. As long as there are
// other Rc objects pointing at the same Owner, it will stay alive. Notice
// that the Rc wrapper around Gadget.owner gets automatically dereferenced
// for us.
println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name);
println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name);
// At the end of the method, gadget1 and gadget2 get destroyed, and with
// them the last counted references to our Owner. Gadget Man now gets
// destroyed as well.
}
```
If our requirements change, and we also need to be able to traverse from
Owner->Gadget, we will run into problems: an Rc pointer from Owner->Gadget
introduces a cycle between the objects. This means that their reference counts
can never reach 0, and the objects will stay alive: a memory leak. In order to
get around this, we can use `Weak` pointers. These are reference counted
pointers that don't keep an object alive if there are no normal `Rc` (or
*strong*) pointers left.
Rust actually makes it somewhat difficult to produce this loop in the first
place: in order to end up with two objects that point at each other, one of
them needs to be mutable. This is problematic because Rc enforces memory
safety by only giving out shared references to the object it wraps, and these
don't allow direct mutation. We need to wrap the part of the object we wish to
mutate in a `RefCell`, which provides *interior mutability*: a method to
achieve mutability through a shared reference. `RefCell` enforces Rust's
borrowing rules at runtime. Read the `Cell` documentation for more details on
interior mutability.
```rust
use std::rc::Rc;
use std::rc::Weak;
use std::cell::RefCell;
struct Owner {
name: String,
gadgets: RefCell<Vec<Weak<Gadget>>>
//...other fields
}
struct Gadget {
id: int,
owner: Rc<Owner>
//...other fields
}
fn main() {
// Create a reference counted Owner. Note the fact that we've put the
// Owner's vector of Gadgets inside a RefCell so that we can mutate it
// through a shared reference.
let gadget_owner : Rc<Owner> = Rc::new(
Owner {
name: "Gadget Man".to_string(),
gadgets: RefCell::new(Vec::new())
}
);
// Create Gadgets belonging to gadget_owner as before.
let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()});
let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()});
// Add the Gadgets to their Owner. To do this we mutably borrow from
// the RefCell holding the Owner's Gadgets.
gadget_owner.gadgets.borrow_mut().push(gadget1.clone().downgrade());
gadget_owner.gadgets.borrow_mut().push(gadget2.clone().downgrade());
// Iterate over our Gadgets, printing their details out
for gadget_opt in gadget_owner.gadgets.borrow().iter() {
// gadget_opt is a Weak<Gadget>. Since weak pointers can't guarantee
// that their object is still alive, we need to call upgrade() on them
// to turn them into a strong reference. This returns an Option, which
// contains a reference to our object if it still exists.
let gadget = gadget_opt.upgrade().unwrap();
println!("Gadget {} owned by {}", gadget.id, gadget.owner.name);
}
// At the end of the method, gadget_owner, gadget1 and gadget2 get
// destroyed. There are now no strong (Rc) references to the gadgets.
// Once they get destroyed, the Gadgets get destroyed. This zeroes the
// reference count on Gadget Man, so he gets destroyed as well.
}
```
*/
use core::mem::transmute;
use core::cell::Cell;
use core::clone::Clone;
use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::kinds::marker;
use core::ops::{Deref, Drop};
use core::option::{Option, Some, None};
use core::ptr;
use core::ptr::RawPtr;
use core::mem::{min_align_of, size_of};
use core::fmt;
use heap::deallocate;
struct RcBox<T> {
value: T,
strong: Cell<uint>,
weak: Cell<uint>
}
/// Immutable reference counted pointer type
#[unsafe_no_drop_flag]
pub struct Rc<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Rc<T> {
/// Construct a new reference-counted box
pub fn new(value: T) -> Rc<T> {
unsafe {
Rc {
// there is an implicit weak pointer owned by all the
// strong pointers, which ensures that the weak
// destructor never frees the allocation while the
// strong destructor is running, even if the weak
// pointer is stored inside the strong one.
_ptr: transmute(box RcBox {
value: value,
strong: Cell::new(1),
weak: Cell::new(1)
}),
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
}
impl<T> Rc<T> {
/// Downgrade the reference-counted pointer to a weak reference
pub fn downgrade(&self) -> Weak<T> {
self.inc_weak();
Weak {
_ptr: self._ptr,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
impl<T: Clone> Rc<T> {
/// Acquires a mutable pointer to the inner contents by guaranteeing that
/// the reference count is one (no sharing is possible).
///
/// This is also referred to as a copy-on-write operation because the inner
/// data is cloned if the reference count is greater than one.
#[inline]
#[experimental]
pub fn make_unique<'a>(&'a mut self) -> &'a mut T {
// Note that we hold a strong reference, which also counts as
// a weak reference, so we only clone if there is an
// additional reference of either kind.
if self.strong()!= 1 || self.weak()!= 1 {
*self = Rc::new(self.deref().clone())
}
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Rc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
let inner = unsafe { &mut *self._ptr };
&mut inner.value
}
}
impl<T> Deref<T> for Rc<T> {
/// Borrow the value contained in the reference-counted box
#[inline(always)]
fn deref<'a>(&'a self) -> &'a T {
&self.inner().value
}
}
#[unsafe_destructor]
impl<T> Drop for Rc<T> {
fn drop(&mut self) {
unsafe {
if!self._ptr.is_null() {
self.dec_strong();
if self.strong() == 0 {
ptr::read(self.deref()); // destroy the contained object
// remove the implicit "strong weak" pointer now
// that we've destroyed the contents.
self.dec_weak();
if self.weak() == 0 {
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
}
#[unstable]
impl<T> Clone for Rc<T> {
#[inline]
fn
|
(&self) -> Rc<T> {
self.inc_strong();
Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
impl<T: Default> Default for Rc<T> {
#[inline]
fn default() -> Rc<T> {
Rc::new(Default::default())
}
}
impl<T: PartialEq> PartialEq for Rc<T> {
#[inline(always)]
fn eq(&self, other: &Rc<T>) -> bool { **self == **other }
#[inline(always)]
fn ne(&self, other: &Rc<T>) -> bool { **self!= **other }
}
impl<T: Eq> Eq for Rc<T> {}
impl<T: PartialOrd> PartialOrd for Rc<T> {
#[inline(always)]
fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
#[inline(always)]
fn lt(&self, other: &Rc<T>) -> bool { **self < **other }
#[inline(always)]
fn le(&self, other: &Rc<T>) -> bool { **self <= **other }
#[inline(always)]
fn gt(&self, other: &Rc<T>) -> bool { **self > **other }
#[inline(always)]
fn ge(&self, other: &Rc<T>) -> bool { **self >= **other }
}
impl<T: Ord> Ord for Rc<T> {
#[inline]
fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) }
}
impl<T: fmt::Show> fmt::Show for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
/// Weak reference to a reference-counted box
#[unsafe_no_drop_flag]
pub struct Weak<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Weak<T> {
/// Upgrade a weak reference to a strong reference
pub fn upgrade(&self) -> Option<Rc<T>> {
if self.strong() == 0 {
None
} else {
self.inc_strong();
Some(Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare })
}
}
}
#[unsafe_destructor]
impl<T> Drop for Weak<T> {
fn drop(&mut self) {
unsafe {
if!self._ptr.is_null() {
self.dec_weak();
// the weak count starts at 1, and will only go to
// zero if all the strong pointers have disappeared.
if self.weak() == 0 {
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
#[unstable]
impl<T> Clone for Weak<T> {
#[inline]
fn clone(&self) -> Weak<T> {
self.inc_weak();
Weak { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
#[doc(hidden)]
trait RcBoxPtr<T> {
fn inner<'a>(&'a self) -> &'a RcBox<T>;
#[inline]
fn strong(&self) -> uint { self.inner().strong.get() }
#[inline]
fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
#[inline]
fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
#[inline]
fn weak(&self) -> uint { self.inner().weak.get() }
#[inline]
fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
#[inline]
fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
}
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
#[cfg(test)]
#[allow(experimental)]
mod tests {
use super::{Rc, Weak};
use std::cell::RefCell;
use std::option::{Option, Some, None};
use std::mem::drop;
use std::clone::Clone;
#[test]
fn test_clone() {
let x = Rc::new(RefCell::new(5i));
let y = x.clone();
*x.borrow_mut() = 20;
assert_eq!(*y.borrow(), 20);
}
#[test]
fn test_simple() {
let x = Rc::new(5i);
assert_eq!(*x, 5);
}
#[test]
fn test_simple_clone() {
let x = Rc::new(5i);
let y = x.clone();
assert_eq!(*x, 5);
assert_eq!(*y, 5);
}
#[test]
fn test_destructor() {
let x = Rc::new(box 5i);
assert_eq!(**x, 5);
}
#[test]
fn test_live() {
let x = Rc::new(5i);
let y = x.downgrade();
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Rc::new(5i);
let y = x.downgrade();
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn gc_inside() {
// see issue #11532
use std::gc::GC;
let a = Rc::new(RefCell::new(box(GC) 1i));
assert!(a.try_borrow_mut().is_some());
}
#[test]
fn weak_self_cyclic() {
struct Cycle {
x: RefCell<Option<Weak<Cycle>>>
}
let a = Rc::new(Cycle { x: RefCell::new(None) });
let b = a.clone().downgrade();
*a.x.borrow_mut() = Some(b);
// hopefully we don't double-free (or leak)...
}
#[test]
fn test_cowrc_clone_make_unique() {
let mut cow0 = Rc::new(75u);
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
assert!(75 == *cow0.make_unique());
assert!(75 == *cow1.make_unique());
assert!(75 == *cow2.make_unique());
*cow0.make_unique() += 1;
*cow1.make_unique() += 2;
*cow2.make_unique() += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(78 == *cow2);
// none should point to the same backing memory
assert!(*cow0!= *cow1);
assert!(*cow0!= *cow2);
assert!(*cow1!= *cow2);
}
#[test]
fn test_cowrc_clone_unique2() {
let mut cow0 = Rc::new(75u);
let cow1 = cow0.clone();
let cow2 = cow1.clone();
assert!(75 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
*cow0.make_unique() += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
// cow1 and cow2 should share the same contents
// cow0 should have a unique reference
assert!(*cow0!= *cow1);
assert!(*cow0!= *cow2);
assert!(*cow1 == *cow2);
}
#[test]
fn test_cowrc_clone_weak() {
let mut cow0 = Rc::new(75u);
let cow1_weak = cow0.downgrade();
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
*cow0.make_unique() += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
}
}
|
clone
|
identifier_name
|
rc.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! Task-local reference-counted boxes (`Rc` type)
The `Rc` type provides shared ownership of an immutable value. Destruction is
deterministic, and will occur as soon as the last owner is gone. It is marked
as non-sendable because it avoids the overhead of atomic reference counting.
The `downgrade` method can be used to create a non-owning `Weak` pointer to the
box. A `Weak` pointer can be upgraded to an `Rc` pointer, but will return
`None` if the value has already been freed.
For example, a tree with parent pointers can be represented by putting the
nodes behind strong `Rc` pointers, and then storing the parent pointers as
`Weak` pointers.
## Examples
Consider a scenario where a set of Gadgets are owned by a given Owner. We want
to have our Gadgets point to their Owner. We can't do this with unique
ownership, because more than one gadget may belong to the same Owner. Rc
allows us to share an Owner between multiple Gadgets, and have the Owner kept
alive as long as any Gadget points at it.
```rust
use std::rc::Rc;
struct Owner {
name: String
//...other fields
}
struct Gadget {
id: int,
owner: Rc<Owner>
//...other fields
}
fn main() {
// Create a reference counted Owner.
let gadget_owner : Rc<Owner> = Rc::new(
Owner { name: String::from_str("Gadget Man") }
);
// Create Gadgets belonging to gadget_owner. To increment the reference
// count we clone the Rc object.
let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() };
let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() };
drop(gadget_owner);
// Despite dropping gadget_owner, we're still able to print out the name of
// the Owner of the Gadgets. This is because we've only dropped the
// reference count object, not the Owner it wraps. As long as there are
// other Rc objects pointing at the same Owner, it will stay alive. Notice
// that the Rc wrapper around Gadget.owner gets automatically dereferenced
// for us.
println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name);
println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name);
// At the end of the method, gadget1 and gadget2 get destroyed, and with
// them the last counted references to our Owner. Gadget Man now gets
// destroyed as well.
}
```
If our requirements change, and we also need to be able to traverse from
Owner->Gadget, we will run into problems: an Rc pointer from Owner->Gadget
introduces a cycle between the objects. This means that their reference counts
can never reach 0, and the objects will stay alive: a memory leak. In order to
get around this, we can use `Weak` pointers. These are reference counted
pointers that don't keep an object alive if there are no normal `Rc` (or
*strong*) pointers left.
Rust actually makes it somewhat difficult to produce this loop in the first
place: in order to end up with two objects that point at each other, one of
them needs to be mutable. This is problematic because Rc enforces memory
safety by only giving out shared references to the object it wraps, and these
don't allow direct mutation. We need to wrap the part of the object we wish to
mutate in a `RefCell`, which provides *interior mutability*: a method to
achieve mutability through a shared reference. `RefCell` enforces Rust's
borrowing rules at runtime. Read the `Cell` documentation for more details on
interior mutability.
```rust
use std::rc::Rc;
use std::rc::Weak;
use std::cell::RefCell;
struct Owner {
name: String,
gadgets: RefCell<Vec<Weak<Gadget>>>
//...other fields
}
struct Gadget {
id: int,
owner: Rc<Owner>
//...other fields
}
fn main() {
// Create a reference counted Owner. Note the fact that we've put the
// Owner's vector of Gadgets inside a RefCell so that we can mutate it
// through a shared reference.
let gadget_owner : Rc<Owner> = Rc::new(
Owner {
name: "Gadget Man".to_string(),
gadgets: RefCell::new(Vec::new())
}
);
// Create Gadgets belonging to gadget_owner as before.
let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()});
let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()});
// Add the Gadgets to their Owner. To do this we mutably borrow from
// the RefCell holding the Owner's Gadgets.
gadget_owner.gadgets.borrow_mut().push(gadget1.clone().downgrade());
gadget_owner.gadgets.borrow_mut().push(gadget2.clone().downgrade());
// Iterate over our Gadgets, printing their details out
for gadget_opt in gadget_owner.gadgets.borrow().iter() {
// gadget_opt is a Weak<Gadget>. Since weak pointers can't guarantee
// that their object is still alive, we need to call upgrade() on them
// to turn them into a strong reference. This returns an Option, which
// contains a reference to our object if it still exists.
let gadget = gadget_opt.upgrade().unwrap();
println!("Gadget {} owned by {}", gadget.id, gadget.owner.name);
}
// At the end of the method, gadget_owner, gadget1 and gadget2 get
// destroyed. There are now no strong (Rc) references to the gadgets.
// Once they get destroyed, the Gadgets get destroyed. This zeroes the
// reference count on Gadget Man, so he gets destroyed as well.
}
```
*/
use core::mem::transmute;
use core::cell::Cell;
use core::clone::Clone;
use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering};
use core::default::Default;
use core::kinds::marker;
use core::ops::{Deref, Drop};
use core::option::{Option, Some, None};
use core::ptr;
use core::ptr::RawPtr;
use core::mem::{min_align_of, size_of};
use core::fmt;
use heap::deallocate;
struct RcBox<T> {
value: T,
strong: Cell<uint>,
weak: Cell<uint>
}
/// Immutable reference counted pointer type
#[unsafe_no_drop_flag]
pub struct Rc<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Rc<T> {
/// Construct a new reference-counted box
pub fn new(value: T) -> Rc<T> {
unsafe {
Rc {
// there is an implicit weak pointer owned by all the
// strong pointers, which ensures that the weak
// destructor never frees the allocation while the
// strong destructor is running, even if the weak
// pointer is stored inside the strong one.
_ptr: transmute(box RcBox {
value: value,
strong: Cell::new(1),
weak: Cell::new(1)
}),
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
}
impl<T> Rc<T> {
/// Downgrade the reference-counted pointer to a weak reference
pub fn downgrade(&self) -> Weak<T> {
self.inc_weak();
Weak {
_ptr: self._ptr,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
}
}
impl<T: Clone> Rc<T> {
/// Acquires a mutable pointer to the inner contents by guaranteeing that
/// the reference count is one (no sharing is possible).
///
/// This is also referred to as a copy-on-write operation because the inner
/// data is cloned if the reference count is greater than one.
#[inline]
#[experimental]
pub fn make_unique<'a>(&'a mut self) -> &'a mut T {
// Note that we hold a strong reference, which also counts as
// a weak reference, so we only clone if there is an
// additional reference of either kind.
if self.strong()!= 1 || self.weak()!= 1 {
*self = Rc::new(self.deref().clone())
}
// This unsafety is ok because we're guaranteed that the pointer
// returned is the *only* pointer that will ever be returned to T. Our
// reference count is guaranteed to be 1 at this point, and we required
// the Rc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
let inner = unsafe { &mut *self._ptr };
&mut inner.value
}
}
impl<T> Deref<T> for Rc<T> {
/// Borrow the value contained in the reference-counted box
#[inline(always)]
fn deref<'a>(&'a self) -> &'a T {
&self.inner().value
}
}
#[unsafe_destructor]
impl<T> Drop for Rc<T> {
fn drop(&mut self) {
unsafe {
if!self._ptr.is_null() {
self.dec_strong();
if self.strong() == 0 {
ptr::read(self.deref()); // destroy the contained object
// remove the implicit "strong weak" pointer now
// that we've destroyed the contents.
self.dec_weak();
if self.weak() == 0 {
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
}
#[unstable]
impl<T> Clone for Rc<T> {
#[inline]
fn clone(&self) -> Rc<T> {
self.inc_strong();
Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
impl<T: Default> Default for Rc<T> {
#[inline]
fn default() -> Rc<T> {
Rc::new(Default::default())
}
}
impl<T: PartialEq> PartialEq for Rc<T> {
#[inline(always)]
fn eq(&self, other: &Rc<T>) -> bool { **self == **other }
#[inline(always)]
fn ne(&self, other: &Rc<T>) -> bool { **self!= **other }
}
impl<T: Eq> Eq for Rc<T> {}
impl<T: PartialOrd> PartialOrd for Rc<T> {
#[inline(always)]
fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
#[inline(always)]
fn lt(&self, other: &Rc<T>) -> bool { **self < **other }
#[inline(always)]
fn le(&self, other: &Rc<T>) -> bool { **self <= **other }
#[inline(always)]
fn gt(&self, other: &Rc<T>) -> bool { **self > **other }
#[inline(always)]
fn ge(&self, other: &Rc<T>) -> bool { **self >= **other }
}
impl<T: Ord> Ord for Rc<T> {
#[inline]
fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) }
}
impl<T: fmt::Show> fmt::Show for Rc<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
/// Weak reference to a reference-counted box
#[unsafe_no_drop_flag]
pub struct Weak<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_ptr: *mut RcBox<T>,
_nosend: marker::NoSend,
_noshare: marker::NoShare
}
impl<T> Weak<T> {
/// Upgrade a weak reference to a strong reference
pub fn upgrade(&self) -> Option<Rc<T>> {
if self.strong() == 0 {
None
} else {
self.inc_strong();
Some(Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare })
}
}
}
#[unsafe_destructor]
impl<T> Drop for Weak<T> {
fn drop(&mut self) {
unsafe {
if!self._ptr.is_null() {
self.dec_weak();
// the weak count starts at 1, and will only go to
// zero if all the strong pointers have disappeared.
if self.weak() == 0 {
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
}
}
}
#[unstable]
impl<T> Clone for Weak<T> {
#[inline]
fn clone(&self) -> Weak<T> {
self.inc_weak();
Weak { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoShare }
}
}
#[doc(hidden)]
trait RcBoxPtr<T> {
fn inner<'a>(&'a self) -> &'a RcBox<T>;
#[inline]
fn strong(&self) -> uint { self.inner().strong.get() }
#[inline]
fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); }
#[inline]
fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); }
#[inline]
fn weak(&self) -> uint { self.inner().weak.get() }
#[inline]
fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); }
#[inline]
fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); }
}
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
fn inner<'a>(&'a self) -> &'a RcBox<T> { unsafe { &(*self._ptr) } }
}
#[cfg(test)]
#[allow(experimental)]
mod tests {
use super::{Rc, Weak};
use std::cell::RefCell;
use std::option::{Option, Some, None};
use std::mem::drop;
use std::clone::Clone;
#[test]
fn test_clone() {
let x = Rc::new(RefCell::new(5i));
let y = x.clone();
*x.borrow_mut() = 20;
assert_eq!(*y.borrow(), 20);
}
#[test]
fn test_simple() {
let x = Rc::new(5i);
assert_eq!(*x, 5);
}
#[test]
fn test_simple_clone() {
let x = Rc::new(5i);
let y = x.clone();
assert_eq!(*x, 5);
assert_eq!(*y, 5);
}
#[test]
fn test_destructor() {
let x = Rc::new(box 5i);
assert_eq!(**x, 5);
}
#[test]
fn test_live() {
let x = Rc::new(5i);
let y = x.downgrade();
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Rc::new(5i);
let y = x.downgrade();
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn gc_inside()
|
#[test]
fn weak_self_cyclic() {
struct Cycle {
x: RefCell<Option<Weak<Cycle>>>
}
let a = Rc::new(Cycle { x: RefCell::new(None) });
let b = a.clone().downgrade();
*a.x.borrow_mut() = Some(b);
// hopefully we don't double-free (or leak)...
}
#[test]
fn test_cowrc_clone_make_unique() {
let mut cow0 = Rc::new(75u);
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
assert!(75 == *cow0.make_unique());
assert!(75 == *cow1.make_unique());
assert!(75 == *cow2.make_unique());
*cow0.make_unique() += 1;
*cow1.make_unique() += 2;
*cow2.make_unique() += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(78 == *cow2);
// none should point to the same backing memory
assert!(*cow0!= *cow1);
assert!(*cow0!= *cow2);
assert!(*cow1!= *cow2);
}
#[test]
fn test_cowrc_clone_unique2() {
let mut cow0 = Rc::new(75u);
let cow1 = cow0.clone();
let cow2 = cow1.clone();
assert!(75 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
*cow0.make_unique() += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
// cow1 and cow2 should share the same contents
// cow0 should have a unique reference
assert!(*cow0!= *cow1);
assert!(*cow0!= *cow2);
assert!(*cow1 == *cow2);
}
#[test]
fn test_cowrc_clone_weak() {
let mut cow0 = Rc::new(75u);
let cow1_weak = cow0.downgrade();
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
*cow0.make_unique() += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
}
}
|
{
// see issue #11532
use std::gc::GC;
let a = Rc::new(RefCell::new(box(GC) 1i));
assert!(a.try_borrow_mut().is_some());
}
|
identifier_body
|
main.rs
|
/// Returns true if the string is a palindrome
fn
|
(string: &str) -> bool {
// The first part of the string
let forward = string.chars().take(string.len() / 2);
// The second part of the string in reverse order
let reverse = string.chars().rev().take(string.len() / 2);
// We group the two parts of the string in tuples
let mut both_directions = forward.zip(reverse);
// The word is a palindrome if each tuple contains two times
// the same character
both_directions.all(|(a, b)| a == b)
}
fn main() {
let test_strings = ["nope", "eevee", "lalala", "rust", "lalalal"];
for &string in &test_strings {
println!("{}: {}", string, palindrome(string));
}
}
#[test]
fn test_palindromes() {
let palindromes = ["eevee", "lalalal", "オオオオ", "", "anna"];
let non_palindromes = ["nope", "lalala", "car", "rain", "house", "computer", "rust"];
assert!(palindromes.iter().all(|&s| palindrome(s)));
assert!(non_palindromes.iter().all(|&s|!palindrome(s)));
}
|
palindrome
|
identifier_name
|
main.rs
|
/// Returns true if the string is a palindrome
fn palindrome(string: &str) -> bool {
// The first part of the string
let forward = string.chars().take(string.len() / 2);
// The second part of the string in reverse order
let reverse = string.chars().rev().take(string.len() / 2);
// We group the two parts of the string in tuples
let mut both_directions = forward.zip(reverse);
// The word is a palindrome if each tuple contains two times
// the same character
both_directions.all(|(a, b)| a == b)
}
fn main()
|
#[test]
fn test_palindromes() {
let palindromes = ["eevee", "lalalal", "オオオオ", "", "anna"];
let non_palindromes = ["nope", "lalala", "car", "rain", "house", "computer", "rust"];
assert!(palindromes.iter().all(|&s| palindrome(s)));
assert!(non_palindromes.iter().all(|&s|!palindrome(s)));
}
|
{
let test_strings = ["nope", "eevee", "lalala", "rust", "lalalal"];
for &string in &test_strings {
println!("{}: {}", string, palindrome(string));
}
}
|
identifier_body
|
main.rs
|
/// Returns true if the string is a palindrome
fn palindrome(string: &str) -> bool {
// The first part of the string
let forward = string.chars().take(string.len() / 2);
// The second part of the string in reverse order
let reverse = string.chars().rev().take(string.len() / 2);
// We group the two parts of the string in tuples
let mut both_directions = forward.zip(reverse);
// The word is a palindrome if each tuple contains two times
// the same character
both_directions.all(|(a, b)| a == b)
}
fn main() {
let test_strings = ["nope", "eevee", "lalala", "rust", "lalalal"];
for &string in &test_strings {
println!("{}: {}", string, palindrome(string));
}
}
#[test]
fn test_palindromes() {
|
}
|
let palindromes = ["eevee", "lalalal", "オオオオ", "", "anna"];
let non_palindromes = ["nope", "lalala", "car", "rain", "house", "computer", "rust"];
assert!(palindromes.iter().all(|&s| palindrome(s)));
assert!(non_palindromes.iter().all(|&s| !palindrome(s)));
|
random_line_split
|
lib.rs
|
#![feature(pub_restricted, slice_patterns)]
#[macro_use]
extern crate gfx;
extern crate gfx_core;
extern crate gfx_device_gl;
extern crate gfx_window_glutin;
extern crate glutin;
extern crate rusttype;
extern crate conrod;
extern crate cgmath;
extern crate image;
extern crate quickersort;
extern crate rayon;
pub mod combined;
pub mod specialized;
pub mod camera;
pub mod layer;
pub mod color;
pub mod texture;
pub mod sprites;
pub mod lights;
pub mod types;
#[doc(hidden)]
pub mod utils;
pub use camera::Camera;
pub use color::{Color, NormalizedColor};
pub use texture::TextureBind;
use types::*;
use texture::TextureBinds;
use glutin::{WindowBuilder, Window};
pub struct Graphics {
device: Device,
factory: Factory,
encoder: Encoder,
output_color: OutputColor,
output_depth: OutputDepth,
texture_binds: TextureBinds,
}
impl Graphics {
pub fn new(builder: WindowBuilder) -> (Window, Self) {
let (window, device, mut factory, color, depth) =
gfx_window_glutin::init::<ColorFormat, DepthFormat>(
builder.with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2))));
let encoder = factory.create_command_buffer().into();
let graphics = Graphics {
device: device,
factory: factory,
encoder: encoder,
output_color: color,
output_depth: depth,
texture_binds: TextureBinds::new(),
};
(window, graphics)
}
pub fn resize(&mut self, window: &Window) {
use gfx_core::format::Formatted;
use gfx_core::memory::Typed;
let (w, h) = window.get_inner_size_pixels().unwrap();
let aa = window.get_pixel_format().multisampling
.unwrap_or(0) as gfx::texture::NumSamples;
let dim = (w as gfx::texture::Size, h as gfx::texture::Size, 1, aa.into());
let (color, depth) = gfx_device_gl::create_main_targets_raw(
dim,
ColorFormat::get_format().0,
DepthFormat::get_format().0);
self.output_color = Typed::new(color);
self.output_depth = Typed::new(depth);
}
pub fn draw<'a>(&'a mut self) -> Frame<'a> {
Frame::new(self)
}
pub fn bind_textures(&mut self, color: TextureView<ColorFormat>,
normal: TextureView<NormalFormat>) -> TextureBind {
self.texture_binds.insert(texture::Bind {
color: color,
normal: normal,
})
}
pub fn
|
(&mut self, bind: TextureBind, color: TextureView<ColorFormat>) {
self.texture_binds.get_mut(bind).color = color;
}
pub fn update_bound_normal(&mut self, bind: TextureBind, normal: TextureView<NormalFormat>) {
self.texture_binds.get_mut(bind).normal = normal;
}
pub fn unbind_textures(&mut self, bind: TextureBind) {
self.texture_binds.remove(bind);
}
pub fn load_texture<F>(&mut self, w: u16, h: u16, data: &[u8]) -> TextureView<F>
where F: gfx::format::TextureFormat
{
use gfx::traits::*;
let aa_mode = gfx::texture::AaMode::Single;
let kind = gfx::texture::Kind::D2(w, h, aa_mode);
let (_, view) = self.factory.create_texture_immutable_u8::<F>(kind, &[data]).unwrap();
view
}
pub fn load_texture_from_image<F>(&mut self, path: &str) -> TextureView<F>
where F: gfx::format::TextureFormat
{
let image = image::open(path).unwrap().to_rgba();
let (w, h) = image.dimensions();
self.load_texture::<F>(w as u16, h as u16, &image)
}
pub fn load_white_color(&mut self) -> TextureView<ColorFormat> {
self.load_texture::<ColorFormat>(1, 1, &[255, 255, 255, 255])
}
pub fn load_flat_normal(&mut self) -> TextureView<NormalFormat> {
self.load_texture::<NormalFormat>(1, 1, &[128, 128, 255, 255])
}
}
pub struct Frame<'a> {
pub(crate) graphics: &'a mut Graphics,
pub(crate) should_flush: bool,
}
impl<'a> Frame<'a> {
fn new(graphics: &'a mut Graphics) -> Self {
Frame {
graphics: graphics,
should_flush: false,
}
}
pub fn clear(&mut self, color: NormalizedColor) {
self.graphics.encoder.clear(&self.graphics.output_color, color.to_array());
self.should_flush();
}
pub fn should_flush(&mut self) {
self.should_flush = true;
}
pub fn flush(&mut self) {
self.graphics.encoder.flush(&mut self.graphics.device);
self.should_flush = false;
}
pub fn ensure_flushed(&mut self) {
if self.should_flush { self.flush(); }
}
pub fn present(mut self, window: &'a Window) {
use gfx::traits::*;
self.ensure_flushed();
window.swap_buffers().unwrap();
self.graphics.device.cleanup();
}
}
|
update_bound_color
|
identifier_name
|
lib.rs
|
#![feature(pub_restricted, slice_patterns)]
#[macro_use]
extern crate gfx;
extern crate gfx_core;
extern crate gfx_device_gl;
extern crate gfx_window_glutin;
extern crate glutin;
extern crate rusttype;
extern crate conrod;
extern crate cgmath;
extern crate image;
extern crate quickersort;
extern crate rayon;
pub mod combined;
pub mod specialized;
pub mod camera;
pub mod layer;
pub mod color;
pub mod texture;
pub mod sprites;
pub mod lights;
pub mod types;
#[doc(hidden)]
pub mod utils;
pub use camera::Camera;
pub use color::{Color, NormalizedColor};
pub use texture::TextureBind;
use types::*;
use texture::TextureBinds;
use glutin::{WindowBuilder, Window};
pub struct Graphics {
device: Device,
factory: Factory,
encoder: Encoder,
output_color: OutputColor,
output_depth: OutputDepth,
texture_binds: TextureBinds,
}
impl Graphics {
pub fn new(builder: WindowBuilder) -> (Window, Self) {
let (window, device, mut factory, color, depth) =
gfx_window_glutin::init::<ColorFormat, DepthFormat>(
builder.with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2))));
let encoder = factory.create_command_buffer().into();
let graphics = Graphics {
device: device,
factory: factory,
encoder: encoder,
output_color: color,
output_depth: depth,
texture_binds: TextureBinds::new(),
};
(window, graphics)
}
pub fn resize(&mut self, window: &Window) {
use gfx_core::format::Formatted;
use gfx_core::memory::Typed;
let (w, h) = window.get_inner_size_pixels().unwrap();
let aa = window.get_pixel_format().multisampling
.unwrap_or(0) as gfx::texture::NumSamples;
let dim = (w as gfx::texture::Size, h as gfx::texture::Size, 1, aa.into());
let (color, depth) = gfx_device_gl::create_main_targets_raw(
dim,
ColorFormat::get_format().0,
DepthFormat::get_format().0);
self.output_color = Typed::new(color);
self.output_depth = Typed::new(depth);
}
pub fn draw<'a>(&'a mut self) -> Frame<'a> {
Frame::new(self)
}
pub fn bind_textures(&mut self, color: TextureView<ColorFormat>,
normal: TextureView<NormalFormat>) -> TextureBind {
self.texture_binds.insert(texture::Bind {
color: color,
normal: normal,
})
}
pub fn update_bound_color(&mut self, bind: TextureBind, color: TextureView<ColorFormat>) {
self.texture_binds.get_mut(bind).color = color;
}
pub fn update_bound_normal(&mut self, bind: TextureBind, normal: TextureView<NormalFormat>) {
self.texture_binds.get_mut(bind).normal = normal;
}
pub fn unbind_textures(&mut self, bind: TextureBind) {
self.texture_binds.remove(bind);
}
pub fn load_texture<F>(&mut self, w: u16, h: u16, data: &[u8]) -> TextureView<F>
where F: gfx::format::TextureFormat
{
use gfx::traits::*;
let aa_mode = gfx::texture::AaMode::Single;
let kind = gfx::texture::Kind::D2(w, h, aa_mode);
let (_, view) = self.factory.create_texture_immutable_u8::<F>(kind, &[data]).unwrap();
view
}
pub fn load_texture_from_image<F>(&mut self, path: &str) -> TextureView<F>
where F: gfx::format::TextureFormat
{
let image = image::open(path).unwrap().to_rgba();
let (w, h) = image.dimensions();
self.load_texture::<F>(w as u16, h as u16, &image)
}
pub fn load_white_color(&mut self) -> TextureView<ColorFormat> {
self.load_texture::<ColorFormat>(1, 1, &[255, 255, 255, 255])
}
pub fn load_flat_normal(&mut self) -> TextureView<NormalFormat> {
self.load_texture::<NormalFormat>(1, 1, &[128, 128, 255, 255])
}
}
pub struct Frame<'a> {
pub(crate) graphics: &'a mut Graphics,
pub(crate) should_flush: bool,
}
impl<'a> Frame<'a> {
|
should_flush: false,
}
}
pub fn clear(&mut self, color: NormalizedColor) {
self.graphics.encoder.clear(&self.graphics.output_color, color.to_array());
self.should_flush();
}
pub fn should_flush(&mut self) {
self.should_flush = true;
}
pub fn flush(&mut self) {
self.graphics.encoder.flush(&mut self.graphics.device);
self.should_flush = false;
}
pub fn ensure_flushed(&mut self) {
if self.should_flush { self.flush(); }
}
pub fn present(mut self, window: &'a Window) {
use gfx::traits::*;
self.ensure_flushed();
window.swap_buffers().unwrap();
self.graphics.device.cleanup();
}
}
|
fn new(graphics: &'a mut Graphics) -> Self {
Frame {
graphics: graphics,
|
random_line_split
|
lib.rs
|
#![feature(pub_restricted, slice_patterns)]
#[macro_use]
extern crate gfx;
extern crate gfx_core;
extern crate gfx_device_gl;
extern crate gfx_window_glutin;
extern crate glutin;
extern crate rusttype;
extern crate conrod;
extern crate cgmath;
extern crate image;
extern crate quickersort;
extern crate rayon;
pub mod combined;
pub mod specialized;
pub mod camera;
pub mod layer;
pub mod color;
pub mod texture;
pub mod sprites;
pub mod lights;
pub mod types;
#[doc(hidden)]
pub mod utils;
pub use camera::Camera;
pub use color::{Color, NormalizedColor};
pub use texture::TextureBind;
use types::*;
use texture::TextureBinds;
use glutin::{WindowBuilder, Window};
pub struct Graphics {
device: Device,
factory: Factory,
encoder: Encoder,
output_color: OutputColor,
output_depth: OutputDepth,
texture_binds: TextureBinds,
}
impl Graphics {
pub fn new(builder: WindowBuilder) -> (Window, Self) {
let (window, device, mut factory, color, depth) =
gfx_window_glutin::init::<ColorFormat, DepthFormat>(
builder.with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2))));
let encoder = factory.create_command_buffer().into();
let graphics = Graphics {
device: device,
factory: factory,
encoder: encoder,
output_color: color,
output_depth: depth,
texture_binds: TextureBinds::new(),
};
(window, graphics)
}
pub fn resize(&mut self, window: &Window) {
use gfx_core::format::Formatted;
use gfx_core::memory::Typed;
let (w, h) = window.get_inner_size_pixels().unwrap();
let aa = window.get_pixel_format().multisampling
.unwrap_or(0) as gfx::texture::NumSamples;
let dim = (w as gfx::texture::Size, h as gfx::texture::Size, 1, aa.into());
let (color, depth) = gfx_device_gl::create_main_targets_raw(
dim,
ColorFormat::get_format().0,
DepthFormat::get_format().0);
self.output_color = Typed::new(color);
self.output_depth = Typed::new(depth);
}
pub fn draw<'a>(&'a mut self) -> Frame<'a> {
Frame::new(self)
}
pub fn bind_textures(&mut self, color: TextureView<ColorFormat>,
normal: TextureView<NormalFormat>) -> TextureBind {
self.texture_binds.insert(texture::Bind {
color: color,
normal: normal,
})
}
pub fn update_bound_color(&mut self, bind: TextureBind, color: TextureView<ColorFormat>) {
self.texture_binds.get_mut(bind).color = color;
}
pub fn update_bound_normal(&mut self, bind: TextureBind, normal: TextureView<NormalFormat>) {
self.texture_binds.get_mut(bind).normal = normal;
}
pub fn unbind_textures(&mut self, bind: TextureBind) {
self.texture_binds.remove(bind);
}
pub fn load_texture<F>(&mut self, w: u16, h: u16, data: &[u8]) -> TextureView<F>
where F: gfx::format::TextureFormat
{
use gfx::traits::*;
let aa_mode = gfx::texture::AaMode::Single;
let kind = gfx::texture::Kind::D2(w, h, aa_mode);
let (_, view) = self.factory.create_texture_immutable_u8::<F>(kind, &[data]).unwrap();
view
}
pub fn load_texture_from_image<F>(&mut self, path: &str) -> TextureView<F>
where F: gfx::format::TextureFormat
{
let image = image::open(path).unwrap().to_rgba();
let (w, h) = image.dimensions();
self.load_texture::<F>(w as u16, h as u16, &image)
}
pub fn load_white_color(&mut self) -> TextureView<ColorFormat> {
self.load_texture::<ColorFormat>(1, 1, &[255, 255, 255, 255])
}
pub fn load_flat_normal(&mut self) -> TextureView<NormalFormat> {
self.load_texture::<NormalFormat>(1, 1, &[128, 128, 255, 255])
}
}
pub struct Frame<'a> {
pub(crate) graphics: &'a mut Graphics,
pub(crate) should_flush: bool,
}
impl<'a> Frame<'a> {
fn new(graphics: &'a mut Graphics) -> Self {
Frame {
graphics: graphics,
should_flush: false,
}
}
pub fn clear(&mut self, color: NormalizedColor) {
self.graphics.encoder.clear(&self.graphics.output_color, color.to_array());
self.should_flush();
}
pub fn should_flush(&mut self)
|
pub fn flush(&mut self) {
self.graphics.encoder.flush(&mut self.graphics.device);
self.should_flush = false;
}
pub fn ensure_flushed(&mut self) {
if self.should_flush { self.flush(); }
}
pub fn present(mut self, window: &'a Window) {
use gfx::traits::*;
self.ensure_flushed();
window.swap_buffers().unwrap();
self.graphics.device.cleanup();
}
}
|
{
self.should_flush = true;
}
|
identifier_body
|
lib.rs
|
#![feature(pub_restricted, slice_patterns)]
#[macro_use]
extern crate gfx;
extern crate gfx_core;
extern crate gfx_device_gl;
extern crate gfx_window_glutin;
extern crate glutin;
extern crate rusttype;
extern crate conrod;
extern crate cgmath;
extern crate image;
extern crate quickersort;
extern crate rayon;
pub mod combined;
pub mod specialized;
pub mod camera;
pub mod layer;
pub mod color;
pub mod texture;
pub mod sprites;
pub mod lights;
pub mod types;
#[doc(hidden)]
pub mod utils;
pub use camera::Camera;
pub use color::{Color, NormalizedColor};
pub use texture::TextureBind;
use types::*;
use texture::TextureBinds;
use glutin::{WindowBuilder, Window};
pub struct Graphics {
device: Device,
factory: Factory,
encoder: Encoder,
output_color: OutputColor,
output_depth: OutputDepth,
texture_binds: TextureBinds,
}
impl Graphics {
pub fn new(builder: WindowBuilder) -> (Window, Self) {
let (window, device, mut factory, color, depth) =
gfx_window_glutin::init::<ColorFormat, DepthFormat>(
builder.with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2))));
let encoder = factory.create_command_buffer().into();
let graphics = Graphics {
device: device,
factory: factory,
encoder: encoder,
output_color: color,
output_depth: depth,
texture_binds: TextureBinds::new(),
};
(window, graphics)
}
pub fn resize(&mut self, window: &Window) {
use gfx_core::format::Formatted;
use gfx_core::memory::Typed;
let (w, h) = window.get_inner_size_pixels().unwrap();
let aa = window.get_pixel_format().multisampling
.unwrap_or(0) as gfx::texture::NumSamples;
let dim = (w as gfx::texture::Size, h as gfx::texture::Size, 1, aa.into());
let (color, depth) = gfx_device_gl::create_main_targets_raw(
dim,
ColorFormat::get_format().0,
DepthFormat::get_format().0);
self.output_color = Typed::new(color);
self.output_depth = Typed::new(depth);
}
pub fn draw<'a>(&'a mut self) -> Frame<'a> {
Frame::new(self)
}
pub fn bind_textures(&mut self, color: TextureView<ColorFormat>,
normal: TextureView<NormalFormat>) -> TextureBind {
self.texture_binds.insert(texture::Bind {
color: color,
normal: normal,
})
}
pub fn update_bound_color(&mut self, bind: TextureBind, color: TextureView<ColorFormat>) {
self.texture_binds.get_mut(bind).color = color;
}
pub fn update_bound_normal(&mut self, bind: TextureBind, normal: TextureView<NormalFormat>) {
self.texture_binds.get_mut(bind).normal = normal;
}
pub fn unbind_textures(&mut self, bind: TextureBind) {
self.texture_binds.remove(bind);
}
pub fn load_texture<F>(&mut self, w: u16, h: u16, data: &[u8]) -> TextureView<F>
where F: gfx::format::TextureFormat
{
use gfx::traits::*;
let aa_mode = gfx::texture::AaMode::Single;
let kind = gfx::texture::Kind::D2(w, h, aa_mode);
let (_, view) = self.factory.create_texture_immutable_u8::<F>(kind, &[data]).unwrap();
view
}
pub fn load_texture_from_image<F>(&mut self, path: &str) -> TextureView<F>
where F: gfx::format::TextureFormat
{
let image = image::open(path).unwrap().to_rgba();
let (w, h) = image.dimensions();
self.load_texture::<F>(w as u16, h as u16, &image)
}
pub fn load_white_color(&mut self) -> TextureView<ColorFormat> {
self.load_texture::<ColorFormat>(1, 1, &[255, 255, 255, 255])
}
pub fn load_flat_normal(&mut self) -> TextureView<NormalFormat> {
self.load_texture::<NormalFormat>(1, 1, &[128, 128, 255, 255])
}
}
pub struct Frame<'a> {
pub(crate) graphics: &'a mut Graphics,
pub(crate) should_flush: bool,
}
impl<'a> Frame<'a> {
fn new(graphics: &'a mut Graphics) -> Self {
Frame {
graphics: graphics,
should_flush: false,
}
}
pub fn clear(&mut self, color: NormalizedColor) {
self.graphics.encoder.clear(&self.graphics.output_color, color.to_array());
self.should_flush();
}
pub fn should_flush(&mut self) {
self.should_flush = true;
}
pub fn flush(&mut self) {
self.graphics.encoder.flush(&mut self.graphics.device);
self.should_flush = false;
}
pub fn ensure_flushed(&mut self) {
if self.should_flush
|
}
pub fn present(mut self, window: &'a Window) {
use gfx::traits::*;
self.ensure_flushed();
window.swap_buffers().unwrap();
self.graphics.device.cleanup();
}
}
|
{ self.flush(); }
|
conditional_block
|
main.rs
|
// Copyright 2019 Yin Guanhao <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
use anyhow::{bail, Context};
use std::ffi::OsStr;
use walkdir::WalkDir;
fn main() -> anyhow::Result<()>
|
Ok(())
}
|
{
let dirs = ["src", "benches"];
let mut has_error = false;
for entry in dirs.iter().map(WalkDir::new).flatten() {
let entry = entry?;
if entry.file_type().is_file() && entry.path().extension() == Some(OsStr::new("rs")) {
let file_content = std::fs::read_to_string(entry.path())
.with_context(|| format!("open and read {}", entry.path().display()))?;
if !file_content.starts_with("// Copyright") {
eprintln!(
"Missing copyright claim in file: {}",
entry.path().display()
);
has_error = true;
}
}
}
if has_error {
bail!("Error occurred");
}
|
identifier_body
|
main.rs
|
// Copyright 2019 Yin Guanhao <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
use anyhow::{bail, Context};
use std::ffi::OsStr;
use walkdir::WalkDir;
fn
|
() -> anyhow::Result<()> {
let dirs = ["src", "benches"];
let mut has_error = false;
for entry in dirs.iter().map(WalkDir::new).flatten() {
let entry = entry?;
if entry.file_type().is_file() && entry.path().extension() == Some(OsStr::new("rs")) {
let file_content = std::fs::read_to_string(entry.path())
.with_context(|| format!("open and read {}", entry.path().display()))?;
if!file_content.starts_with("// Copyright") {
eprintln!(
"Missing copyright claim in file: {}",
entry.path().display()
);
has_error = true;
}
}
}
if has_error {
bail!("Error occurred");
}
Ok(())
}
|
main
|
identifier_name
|
main.rs
|
// Copyright 2019 Yin Guanhao <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
use anyhow::{bail, Context};
use std::ffi::OsStr;
use walkdir::WalkDir;
fn main() -> anyhow::Result<()> {
let dirs = ["src", "benches"];
let mut has_error = false;
for entry in dirs.iter().map(WalkDir::new).flatten() {
let entry = entry?;
if entry.file_type().is_file() && entry.path().extension() == Some(OsStr::new("rs")) {
let file_content = std::fs::read_to_string(entry.path())
.with_context(|| format!("open and read {}", entry.path().display()))?;
|
entry.path().display()
);
has_error = true;
}
}
}
if has_error {
bail!("Error occurred");
}
Ok(())
}
|
if !file_content.starts_with("// Copyright") {
eprintln!(
"Missing copyright claim in file: {}",
|
random_line_split
|
main.rs
|
// Copyright 2019 Yin Guanhao <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
use anyhow::{bail, Context};
use std::ffi::OsStr;
use walkdir::WalkDir;
fn main() -> anyhow::Result<()> {
let dirs = ["src", "benches"];
let mut has_error = false;
for entry in dirs.iter().map(WalkDir::new).flatten() {
let entry = entry?;
if entry.file_type().is_file() && entry.path().extension() == Some(OsStr::new("rs")) {
let file_content = std::fs::read_to_string(entry.path())
.with_context(|| format!("open and read {}", entry.path().display()))?;
if!file_content.starts_with("// Copyright")
|
}
}
if has_error {
bail!("Error occurred");
}
Ok(())
}
|
{
eprintln!(
"Missing copyright claim in file: {}",
entry.path().display()
);
has_error = true;
}
|
conditional_block
|
framerate.rs
|
//! Framerate control
use libc;
use libc::{c_void, uint32_t, size_t};
use std::mem;
use sdl2::get_error;
mod ll {
/* automatically generated by rust-bindgen */
use libc::*;
#[repr(C)]
pub struct FPSmanager {
pub framecount: uint32_t,
pub rateticks: c_float,
pub baseticks: uint32_t,
pub lastticks: uint32_t,
pub rate: uint32_t,
}
extern "C" {
pub fn SDL_initFramerate(manager: *mut FPSmanager);
pub fn SDL_setFramerate(manager: *mut FPSmanager, rate: uint32_t) -> c_int;
pub fn SDL_getFramerate(manager: *mut FPSmanager) -> c_int;
pub fn SDL_getFramecount(manager: *mut FPSmanager) -> c_int;
pub fn SDL_framerateDelay(manager: *mut FPSmanager) -> uint32_t;
}
}
/// Structure holding the state and timing information of the framerate controller.
pub struct FPSManager {
raw: *mut ll::FPSmanager,
|
unsafe {
let size = mem::size_of::<ll::FPSmanager>() as size_t;
let raw = libc::malloc(size) as *mut ll::FPSmanager;
ll::SDL_initFramerate(raw);
FPSManager { raw: raw }
}
}
/// Set the framerate in Hz.
pub fn set_framerate(&mut self, rate: u32) -> Result<(), String> {
let ret = unsafe { ll::SDL_setFramerate(self.raw, rate as uint32_t) };
match ret {
0 => Ok(()),
_ => Err(get_error())
}
}
/// Return the current target framerate in Hz.
pub fn get_framerate(&self) -> i32 {
// will not get an error
unsafe { ll::SDL_getFramerate(self.raw) as i32 }
}
/// Return the current framecount.
pub fn get_frame_count(&self) -> i32 {
// will not get an error
unsafe { ll::SDL_getFramecount(self.raw) as i32 }
}
/// Delay execution to maintain a constant framerate and calculate fps.
pub fn delay(&mut self) -> u32 {
unsafe { ll::SDL_framerateDelay(self.raw) as u32 }
}
}
impl Drop for FPSManager {
fn drop(&mut self) {
unsafe { libc::free(self.raw as *mut c_void) }
}
}
|
}
impl FPSManager {
/// Create the framerate manager.
pub fn new() -> FPSManager {
|
random_line_split
|
framerate.rs
|
//! Framerate control
use libc;
use libc::{c_void, uint32_t, size_t};
use std::mem;
use sdl2::get_error;
mod ll {
/* automatically generated by rust-bindgen */
use libc::*;
#[repr(C)]
pub struct FPSmanager {
pub framecount: uint32_t,
pub rateticks: c_float,
pub baseticks: uint32_t,
pub lastticks: uint32_t,
pub rate: uint32_t,
}
extern "C" {
pub fn SDL_initFramerate(manager: *mut FPSmanager);
pub fn SDL_setFramerate(manager: *mut FPSmanager, rate: uint32_t) -> c_int;
pub fn SDL_getFramerate(manager: *mut FPSmanager) -> c_int;
pub fn SDL_getFramecount(manager: *mut FPSmanager) -> c_int;
pub fn SDL_framerateDelay(manager: *mut FPSmanager) -> uint32_t;
}
}
/// Structure holding the state and timing information of the framerate controller.
pub struct FPSManager {
raw: *mut ll::FPSmanager,
}
impl FPSManager {
/// Create the framerate manager.
pub fn new() -> FPSManager {
unsafe {
let size = mem::size_of::<ll::FPSmanager>() as size_t;
let raw = libc::malloc(size) as *mut ll::FPSmanager;
ll::SDL_initFramerate(raw);
FPSManager { raw: raw }
}
}
/// Set the framerate in Hz.
pub fn set_framerate(&mut self, rate: u32) -> Result<(), String> {
let ret = unsafe { ll::SDL_setFramerate(self.raw, rate as uint32_t) };
match ret {
0 => Ok(()),
_ => Err(get_error())
}
}
/// Return the current target framerate in Hz.
pub fn get_framerate(&self) -> i32 {
// will not get an error
unsafe { ll::SDL_getFramerate(self.raw) as i32 }
}
/// Return the current framecount.
pub fn get_frame_count(&self) -> i32 {
// will not get an error
unsafe { ll::SDL_getFramecount(self.raw) as i32 }
}
/// Delay execution to maintain a constant framerate and calculate fps.
pub fn
|
(&mut self) -> u32 {
unsafe { ll::SDL_framerateDelay(self.raw) as u32 }
}
}
impl Drop for FPSManager {
fn drop(&mut self) {
unsafe { libc::free(self.raw as *mut c_void) }
}
}
|
delay
|
identifier_name
|
framerate.rs
|
//! Framerate control
use libc;
use libc::{c_void, uint32_t, size_t};
use std::mem;
use sdl2::get_error;
mod ll {
/* automatically generated by rust-bindgen */
use libc::*;
#[repr(C)]
pub struct FPSmanager {
pub framecount: uint32_t,
pub rateticks: c_float,
pub baseticks: uint32_t,
pub lastticks: uint32_t,
pub rate: uint32_t,
}
extern "C" {
pub fn SDL_initFramerate(manager: *mut FPSmanager);
pub fn SDL_setFramerate(manager: *mut FPSmanager, rate: uint32_t) -> c_int;
pub fn SDL_getFramerate(manager: *mut FPSmanager) -> c_int;
pub fn SDL_getFramecount(manager: *mut FPSmanager) -> c_int;
pub fn SDL_framerateDelay(manager: *mut FPSmanager) -> uint32_t;
}
}
/// Structure holding the state and timing information of the framerate controller.
pub struct FPSManager {
raw: *mut ll::FPSmanager,
}
impl FPSManager {
/// Create the framerate manager.
pub fn new() -> FPSManager {
unsafe {
let size = mem::size_of::<ll::FPSmanager>() as size_t;
let raw = libc::malloc(size) as *mut ll::FPSmanager;
ll::SDL_initFramerate(raw);
FPSManager { raw: raw }
}
}
/// Set the framerate in Hz.
pub fn set_framerate(&mut self, rate: u32) -> Result<(), String> {
let ret = unsafe { ll::SDL_setFramerate(self.raw, rate as uint32_t) };
match ret {
0 => Ok(()),
_ => Err(get_error())
}
}
/// Return the current target framerate in Hz.
pub fn get_framerate(&self) -> i32 {
// will not get an error
unsafe { ll::SDL_getFramerate(self.raw) as i32 }
}
/// Return the current framecount.
pub fn get_frame_count(&self) -> i32
|
/// Delay execution to maintain a constant framerate and calculate fps.
pub fn delay(&mut self) -> u32 {
unsafe { ll::SDL_framerateDelay(self.raw) as u32 }
}
}
impl Drop for FPSManager {
fn drop(&mut self) {
unsafe { libc::free(self.raw as *mut c_void) }
}
}
|
{
// will not get an error
unsafe { ll::SDL_getFramecount(self.raw) as i32 }
}
|
identifier_body
|
main.rs
|
::BlobRepo;
use bookmarks::{BookmarkName, Freshness};
use cached_config::ConfigStore;
use clap_old::ArgMatches;
use cmdlib::{
args::{self, MononokeClapApp, MononokeMatches},
helpers, monitoring,
};
use cmdlib_x_repo::create_commit_syncer_from_matches;
use context::CoreContext;
use cross_repo_sync::{
types::{Source, Target},
CommitSyncer,
};
use derived_data_utils::derive_data_for_csids;
use fbinit::FacebookInit;
use futures::{
compat::Future01CompatExt,
future::{self, try_join},
stream::{self, TryStreamExt},
StreamExt,
};
use futures_stats::TimedFutureExt;
use live_commit_sync_config::{CfgrLiveCommitSyncConfig, LiveCommitSyncConfig};
use mononoke_api_types::InnerRepo;
use mononoke_hg_sync_job_helper_lib::wait_for_latest_log_id_to_be_synced;
use mononoke_types::{ChangesetId, RepositoryId};
use mutable_counters::{MutableCounters, SqlMutableCounters};
use regex::Regex;
use scuba_ext::MononokeScubaSampleBuilder;
use skiplist::SkiplistIndex;
use slog::{debug, error, info, warn};
use std::{collections::HashSet, sync::Arc, time::Duration};
use synced_commit_mapping::SyncedCommitMapping;
mod cli;
mod reporting;
mod setup;
mod sync;
use crate::cli::{
create_app, ARG_BACKSYNC_BACKPRESSURE_REPOS_IDS, ARG_BOOKMARK_REGEX, ARG_CATCH_UP_ONCE,
ARG_DERIVED_DATA_TYPES, ARG_HG_SYNC_BACKPRESSURE, ARG_ONCE, ARG_TAIL, ARG_TARGET_BOOKMARK,
};
use crate::reporting::{add_common_fields, log_bookmark_update_result, log_noop_iteration};
use crate::setup::{get_scuba_sample, get_sleep_secs, get_starting_commit};
use crate::sync::{sync_commit_and_ancestors, sync_single_bookmark_update_log};
fn print_error(ctx: CoreContext, error: &Error) {
error!(ctx.logger(), "{}", error);
for cause in error.chain().skip(1) {
error!(ctx.logger(), "caused by: {}", cause);
}
}
async fn run_in_single_commit_mode<M: SyncedCommitMapping + Clone +'static>(
ctx: &CoreContext,
bcs: ChangesetId,
commit_syncer: CommitSyncer<M>,
scuba_sample: MononokeScubaSampleBuilder,
source_skiplist_index: Source<Arc<SkiplistIndex>>,
target_skiplist_index: Target<Arc<SkiplistIndex>>,
maybe_bookmark: Option<BookmarkName>,
common_bookmarks: HashSet<BookmarkName>,
) -> Result<(), Error> {
info!(
ctx.logger(),
"Checking if {} is already synced {}->{}",
bcs,
commit_syncer.repos.get_source_repo().get_repoid(),
commit_syncer.repos.get_target_repo().get_repoid()
);
if commit_syncer
.commit_sync_outcome_exists(ctx, Source(bcs))
.await?
{
info!(ctx.logger(), "{} is already synced", bcs);
return Ok(());
}
let res = sync_commit_and_ancestors(
ctx,
&commit_syncer,
None, // from_cs_id,
bcs,
maybe_bookmark,
&source_skiplist_index,
&target_skiplist_index,
&common_bookmarks,
scuba_sample,
)
.await;
if res.is_ok() {
info!(ctx.logger(), "successful sync");
}
res.map(|_| ())
}
enum TailingArgs<M> {
CatchUpOnce(CommitSyncer<M>),
LoopForever(CommitSyncer<M>, ConfigStore),
}
async fn run_in_tailing_mode<
M: SyncedCommitMapping + Clone +'static,
C: MutableCounters + Clone + Sync +'static,
>(
ctx: &CoreContext,
mutable_counters: C,
source_skiplist_index: Source<Arc<SkiplistIndex>>,
target_skiplist_index: Target<Arc<SkiplistIndex>>,
common_pushrebase_bookmarks: HashSet<BookmarkName>,
base_scuba_sample: MononokeScubaSampleBuilder,
backpressure_params: BackpressureParams,
derived_data_types: Vec<String>,
tailing_args: TailingArgs<M>,
sleep_secs: u64,
maybe_bookmark_regex: Option<Regex>,
) -> Result<(), Error> {
match tailing_args {
TailingArgs::CatchUpOnce(commit_syncer) => {
let scuba_sample = MononokeScubaSampleBuilder::with_discard();
tail(
&ctx,
&commit_syncer,
&mutable_counters,
scuba_sample,
&common_pushrebase_bookmarks,
&source_skiplist_index,
&target_skiplist_index,
&backpressure_params,
&derived_data_types,
sleep_secs,
&maybe_bookmark_regex,
)
.await?;
}
TailingArgs::LoopForever(commit_syncer, config_store) => {
let live_commit_sync_config =
Arc::new(CfgrLiveCommitSyncConfig::new(ctx.logger(), &config_store)?);
let source_repo_id = commit_syncer.get_source_repo().get_repoid();
loop {
let scuba_sample = base_scuba_sample.clone();
// We only care about public pushes because draft pushes are not in the bookmark
// update log at all.
let enabled =
live_commit_sync_config.push_redirector_enabled_for_public(source_repo_id);
// Pushredirection is enabled - we need to disable forward sync in that case
if enabled {
log_noop_iteration(scuba_sample);
tokio::time::sleep(Duration::new(sleep_secs, 0)).await;
continue;
}
let synced_something = tail(
&ctx,
&commit_syncer,
&mutable_counters,
scuba_sample.clone(),
&common_pushrebase_bookmarks,
&source_skiplist_index,
&target_skiplist_index,
&backpressure_params,
&derived_data_types,
sleep_secs,
&maybe_bookmark_regex,
)
.await?;
if!synced_something {
log_noop_iteration(scuba_sample);
tokio::time::sleep(Duration::new(sleep_secs, 0)).await;
}
}
}
}
Ok(())
}
async fn tail<
M: SyncedCommitMapping + Clone +'static,
C: MutableCounters + Clone + Sync +'static,
>(
ctx: &CoreContext,
commit_syncer: &CommitSyncer<M>,
mutable_counters: &C,
mut scuba_sample: MononokeScubaSampleBuilder,
common_pushrebase_bookmarks: &HashSet<BookmarkName>,
source_skiplist_index: &Source<Arc<SkiplistIndex>>,
target_skiplist_index: &Target<Arc<SkiplistIndex>>,
backpressure_params: &BackpressureParams,
derived_data_types: &[String],
sleep_secs: u64,
maybe_bookmark_regex: &Option<Regex>,
) -> Result<bool, Error> {
let source_repo = commit_syncer.get_source_repo();
let target_repo_id = commit_syncer.get_target_repo_id();
let bookmark_update_log = source_repo.bookmark_update_log();
let counter = format_counter(&commit_syncer);
let maybe_start_id = mutable_counters
.get_counter(ctx.clone(), target_repo_id, &counter)
.compat()
.await?;
let start_id = maybe_start_id.ok_or(format_err!("counter not found"))?;
let limit = 10;
let log_entries = bookmark_update_log
.read_next_bookmark_log_entries(ctx.clone(), start_id as u64, limit, Freshness::MaybeStale)
.try_collect::<Vec<_>>()
.await?;
let remaining_entries = commit_syncer
.get_source_repo()
.count_further_bookmark_log_entries(ctx.clone(), start_id as u64, None)
.await?;
if log_entries.is_empty() {
log_noop_iteration(scuba_sample.clone());
Ok(false)
} else {
scuba_sample.add("queue_size", remaining_entries);
info!(ctx.logger(), "queue size is {}", remaining_entries);
for entry in log_entries {
let entry_id = entry.id;
scuba_sample.add("entry_id", entry.id);
let mut skip = false;
if let Some(regex) = maybe_bookmark_regex
|
if!skip {
let (stats, res) = sync_single_bookmark_update_log(
&ctx,
&commit_syncer,
entry,
source_skiplist_index,
target_skiplist_index,
&common_pushrebase_bookmarks,
scuba_sample.clone(),
)
.timed()
.await;
log_bookmark_update_result(&ctx, entry_id, scuba_sample.clone(), &res, stats);
let maybe_synced_css = res?;
if let SyncResult::Synced(synced_css) = maybe_synced_css {
derive_data_for_csids(
&ctx,
&commit_syncer.get_target_repo(),
synced_css,
derived_data_types,
)?
.await?;
maybe_apply_backpressure(
ctx,
mutable_counters,
backpressure_params,
commit_syncer.get_target_repo(),
scuba_sample.clone(),
sleep_secs,
)
.await?;
}
} else {
info!(
ctx.logger(),
"skipping log entry #{} for {}", entry.id, entry.bookmark_name
);
let mut scuba_sample = scuba_sample.clone();
scuba_sample.add("source_bookmark_name", format!("{}", entry.bookmark_name));
scuba_sample.add("skipped", true);
scuba_sample.log();
}
// Note that updating the counter might fail after successful sync of the commits.
// This is expected - next run will try to update the counter again without
// re-syncing the commits.
mutable_counters
.set_counter(ctx.clone(), target_repo_id, &counter, entry_id, None)
.compat()
.await?;
}
Ok(true)
}
}
async fn maybe_apply_backpressure<C>(
ctx: &CoreContext,
mutable_counters: &C,
backpressure_params: &BackpressureParams,
target_repo: &BlobRepo,
scuba_sample: MononokeScubaSampleBuilder,
sleep_secs: u64,
) -> Result<(), Error>
where
C: MutableCounters + Clone + Sync +'static,
{
let target_repo_id = target_repo.get_repoid();
let limit = 10;
loop {
let max_further_entries = stream::iter(&backpressure_params.backsync_repos)
.map(Ok)
.map_ok(|repo| {
async move {
let repo_id = repo.get_repoid();
let backsyncer_counter = format_backsyncer_counter(&target_repo_id);
let maybe_counter = mutable_counters
.get_counter(ctx.clone(), repo_id, &backsyncer_counter)
.compat()
.await?;
match maybe_counter {
Some(counter) => {
let bookmark_update_log = repo.bookmark_update_log();
debug!(ctx.logger(), "repo {}, counter {}", repo_id, counter);
bookmark_update_log
.count_further_bookmark_log_entries(
ctx.clone(),
counter as u64,
None, // exclude_reason
)
.await
}
None => {
warn!(
ctx.logger(),
"backsyncer counter not found for repo {}!", repo_id,
);
Ok(0)
}
}
}
})
.try_buffer_unordered(100)
.try_fold(0, |acc, x| future::ready(Ok(::std::cmp::max(acc, x))))
.await?;
if max_further_entries > limit {
reporting::log_backpressure(ctx, max_further_entries, scuba_sample.clone());
tokio::time::sleep(Duration::from_secs(sleep_secs)).await;
} else {
break;
}
}
if backpressure_params.wait_for_target_repo_hg_sync {
wait_for_latest_log_id_to_be_synced(ctx, target_repo, mutable_counters, sleep_secs).await?;
}
Ok(())
}
fn format_counter<M: SyncedCommitMapping + Clone +'static>(
commit_syncer: &CommitSyncer<M>,
) -> String {
let source_repo_id = commit_syncer.get_source_repo_id();
format!("xreposync_from_{}", source_repo_id)
}
async fn run<'a>(
fb: FacebookInit,
ctx: CoreContext,
matches: &'a MononokeMatches<'a>,
) -> Result<(), Error> {
let config_store = matches.config_store();
let mut scuba_sample = get_scuba_sample(ctx.clone(), &matches);
let counters = args::open_source_sql::<SqlMutableCounters>(fb, config_store, &matches)?;
let source_repo_id = args::get_source_repo_id(config_store, &matches)?;
let target_repo_id = args::get_target_repo_id(config_store, &matches)?;
let logger = ctx.logger();
let source_repo = args::open_repo_with_repo_id(fb, &logger, source_repo_id, &matches);
let target_repo = args::open_repo_with_repo_id(fb, &logger, target_repo_id, &matches);
let (source_repo, target_repo): (InnerRepo, InnerRepo) =
try_join(source_repo, target_repo).await?;
let commit_syncer = create_commit_syncer_from_matches(&ctx, &matches).await?;
let live_commit_sync_config = Arc::new(CfgrLiveCommitSyncConfig::new(&logger, &config_store)?);
let common_commit_sync_config =
live_commit_sync_config.get_common_config(source_repo.blob_repo.get_repoid())?;
let common_bookmarks: HashSet<_> = common_commit_sync_config
.common_pushrebase_bookmarks
.clone()
.into_iter()
.collect();
let source_skiplist_index = Source(source_repo.skiplist_index.clone());
let target_skiplist_index = Target(target_repo.skiplist_index.clone());
match matches.subcommand() {
(ARG_ONCE, Some(sub_m)) => {
add_common_fields(&mut scuba_sample, &commit_syncer);
let maybe_target_bookmark = sub_m
.value_of(ARG_TARGET_BOOKMARK)
.map(BookmarkName::new)
.transpose()?;
let bcs = get_starting_commit(&ctx, &sub_m, source_repo.blob_repo.clone()).await?;
run_in_single_commit_mode(
&ctx,
bcs,
commit_syncer,
scuba_sample,
source_skiplist_index,
target_skiplist_index,
maybe_target_bookmark,
common_bookmarks,
)
.await
}
(ARG_TAIL, Some(sub_m)) => {
add_common_fields(&mut scuba_sample, &commit_syncer);
let sleep_secs = get_sleep_secs(sub_m)?;
let tailing_args = if sub_m.is_present(ARG_CATCH_UP_ONCE) {
TailingArgs::CatchUpOnce(commit_syncer)
} else {
let config_store = matches.config_store();
TailingArgs::LoopForever(commit_syncer, config_store.clone())
};
let backpressure_params = BackpressureParams::new(&ctx, matches, sub_m).await?;
let derived_data_types: Vec<String> = match sub_m.values_of(ARG_DERIVED_DATA_TYPES) {
Some(derived_data_types) => derived_data_types
.into_iter()
.map(String::from)
.collect::<Vec<_>>(),
None => vec![],
};
let maybe_bookmark_regex = match sub_m.value_of(ARG_BOOKMARK_REGEX) {
Some(regex) => Some(Regex::new(regex)?),
None => None,
};
run_in_tailing_mode(
&ctx,
counters,
source_skiplist_index,
target_skiplist_index,
common_bookmarks,
scuba_sample,
backpressure_params,
derived_data_types,
tailing_args,
sleep_secs,
maybe_bookmark_regex,
)
.await
}
(incorrect, _) => Err(format_err!(
"Incorrect mode
|
{
if !regex.is_match(entry.bookmark_name.as_str()) {
skip = true;
}
}
|
conditional_block
|
main.rs
|
epo::BlobRepo;
use bookmarks::{BookmarkName, Freshness};
use cached_config::ConfigStore;
use clap_old::ArgMatches;
use cmdlib::{
args::{self, MononokeClapApp, MononokeMatches},
helpers, monitoring,
};
use cmdlib_x_repo::create_commit_syncer_from_matches;
use context::CoreContext;
use cross_repo_sync::{
types::{Source, Target},
CommitSyncer,
};
use derived_data_utils::derive_data_for_csids;
use fbinit::FacebookInit;
use futures::{
compat::Future01CompatExt,
future::{self, try_join},
stream::{self, TryStreamExt},
StreamExt,
};
use futures_stats::TimedFutureExt;
use live_commit_sync_config::{CfgrLiveCommitSyncConfig, LiveCommitSyncConfig};
use mononoke_api_types::InnerRepo;
use mononoke_hg_sync_job_helper_lib::wait_for_latest_log_id_to_be_synced;
use mononoke_types::{ChangesetId, RepositoryId};
use mutable_counters::{MutableCounters, SqlMutableCounters};
use regex::Regex;
use scuba_ext::MononokeScubaSampleBuilder;
use skiplist::SkiplistIndex;
use slog::{debug, error, info, warn};
use std::{collections::HashSet, sync::Arc, time::Duration};
use synced_commit_mapping::SyncedCommitMapping;
mod cli;
mod reporting;
mod setup;
mod sync;
use crate::cli::{
create_app, ARG_BACKSYNC_BACKPRESSURE_REPOS_IDS, ARG_BOOKMARK_REGEX, ARG_CATCH_UP_ONCE,
ARG_DERIVED_DATA_TYPES, ARG_HG_SYNC_BACKPRESSURE, ARG_ONCE, ARG_TAIL, ARG_TARGET_BOOKMARK,
};
use crate::reporting::{add_common_fields, log_bookmark_update_result, log_noop_iteration};
use crate::setup::{get_scuba_sample, get_sleep_secs, get_starting_commit};
use crate::sync::{sync_commit_and_ancestors, sync_single_bookmark_update_log};
fn print_error(ctx: CoreContext, error: &Error) {
error!(ctx.logger(), "{}", error);
for cause in error.chain().skip(1) {
error!(ctx.logger(), "caused by: {}", cause);
}
}
async fn run_in_single_commit_mode<M: SyncedCommitMapping + Clone +'static>(
ctx: &CoreContext,
bcs: ChangesetId,
commit_syncer: CommitSyncer<M>,
scuba_sample: MononokeScubaSampleBuilder,
source_skiplist_index: Source<Arc<SkiplistIndex>>,
target_skiplist_index: Target<Arc<SkiplistIndex>>,
maybe_bookmark: Option<BookmarkName>,
common_bookmarks: HashSet<BookmarkName>,
) -> Result<(), Error> {
info!(
ctx.logger(),
"Checking if {} is already synced {}->{}",
bcs,
commit_syncer.repos.get_source_repo().get_repoid(),
commit_syncer.repos.get_target_repo().get_repoid()
);
if commit_syncer
.commit_sync_outcome_exists(ctx, Source(bcs))
.await?
{
info!(ctx.logger(), "{} is already synced", bcs);
return Ok(());
}
let res = sync_commit_and_ancestors(
ctx,
&commit_syncer,
None, // from_cs_id,
bcs,
maybe_bookmark,
&source_skiplist_index,
&target_skiplist_index,
&common_bookmarks,
scuba_sample,
)
.await;
if res.is_ok() {
info!(ctx.logger(), "successful sync");
}
res.map(|_| ())
}
enum TailingArgs<M> {
CatchUpOnce(CommitSyncer<M>),
LoopForever(CommitSyncer<M>, ConfigStore),
}
async fn run_in_tailing_mode<
M: SyncedCommitMapping + Clone +'static,
C: MutableCounters + Clone + Sync +'static,
>(
ctx: &CoreContext,
mutable_counters: C,
source_skiplist_index: Source<Arc<SkiplistIndex>>,
target_skiplist_index: Target<Arc<SkiplistIndex>>,
common_pushrebase_bookmarks: HashSet<BookmarkName>,
base_scuba_sample: MononokeScubaSampleBuilder,
backpressure_params: BackpressureParams,
derived_data_types: Vec<String>,
tailing_args: TailingArgs<M>,
sleep_secs: u64,
maybe_bookmark_regex: Option<Regex>,
) -> Result<(), Error> {
match tailing_args {
TailingArgs::CatchUpOnce(commit_syncer) => {
let scuba_sample = MononokeScubaSampleBuilder::with_discard();
tail(
&ctx,
&commit_syncer,
&mutable_counters,
scuba_sample,
&common_pushrebase_bookmarks,
&source_skiplist_index,
&target_skiplist_index,
&backpressure_params,
&derived_data_types,
sleep_secs,
&maybe_bookmark_regex,
)
.await?;
}
TailingArgs::LoopForever(commit_syncer, config_store) => {
let live_commit_sync_config =
Arc::new(CfgrLiveCommitSyncConfig::new(ctx.logger(), &config_store)?);
let source_repo_id = commit_syncer.get_source_repo().get_repoid();
loop {
let scuba_sample = base_scuba_sample.clone();
// We only care about public pushes because draft pushes are not in the bookmark
// update log at all.
let enabled =
live_commit_sync_config.push_redirector_enabled_for_public(source_repo_id);
// Pushredirection is enabled - we need to disable forward sync in that case
if enabled {
log_noop_iteration(scuba_sample);
tokio::time::sleep(Duration::new(sleep_secs, 0)).await;
continue;
}
let synced_something = tail(
&ctx,
&commit_syncer,
&mutable_counters,
scuba_sample.clone(),
&common_pushrebase_bookmarks,
&source_skiplist_index,
&target_skiplist_index,
&backpressure_params,
&derived_data_types,
sleep_secs,
&maybe_bookmark_regex,
)
.await?;
if!synced_something {
log_noop_iteration(scuba_sample);
tokio::time::sleep(Duration::new(sleep_secs, 0)).await;
}
}
}
}
Ok(())
}
async fn tail<
M: SyncedCommitMapping + Clone +'static,
C: MutableCounters + Clone + Sync +'static,
>(
ctx: &CoreContext,
commit_syncer: &CommitSyncer<M>,
mutable_counters: &C,
mut scuba_sample: MononokeScubaSampleBuilder,
common_pushrebase_bookmarks: &HashSet<BookmarkName>,
source_skiplist_index: &Source<Arc<SkiplistIndex>>,
target_skiplist_index: &Target<Arc<SkiplistIndex>>,
backpressure_params: &BackpressureParams,
derived_data_types: &[String],
sleep_secs: u64,
maybe_bookmark_regex: &Option<Regex>,
) -> Result<bool, Error> {
let source_repo = commit_syncer.get_source_repo();
let target_repo_id = commit_syncer.get_target_repo_id();
let bookmark_update_log = source_repo.bookmark_update_log();
let counter = format_counter(&commit_syncer);
let maybe_start_id = mutable_counters
.get_counter(ctx.clone(), target_repo_id, &counter)
.compat()
.await?;
let start_id = maybe_start_id.ok_or(format_err!("counter not found"))?;
let limit = 10;
let log_entries = bookmark_update_log
.read_next_bookmark_log_entries(ctx.clone(), start_id as u64, limit, Freshness::MaybeStale)
.try_collect::<Vec<_>>()
.await?;
let remaining_entries = commit_syncer
.get_source_repo()
.count_further_bookmark_log_entries(ctx.clone(), start_id as u64, None)
.await?;
if log_entries.is_empty() {
log_noop_iteration(scuba_sample.clone());
Ok(false)
} else {
scuba_sample.add("queue_size", remaining_entries);
info!(ctx.logger(), "queue size is {}", remaining_entries);
for entry in log_entries {
let entry_id = entry.id;
scuba_sample.add("entry_id", entry.id);
let mut skip = false;
if let Some(regex) = maybe_bookmark_regex {
if!regex.is_match(entry.bookmark_name.as_str()) {
skip = true;
}
}
if!skip {
let (stats, res) = sync_single_bookmark_update_log(
&ctx,
&commit_syncer,
entry,
source_skiplist_index,
target_skiplist_index,
&common_pushrebase_bookmarks,
scuba_sample.clone(),
)
.timed()
.await;
log_bookmark_update_result(&ctx, entry_id, scuba_sample.clone(), &res, stats);
let maybe_synced_css = res?;
if let SyncResult::Synced(synced_css) = maybe_synced_css {
derive_data_for_csids(
&ctx,
&commit_syncer.get_target_repo(),
synced_css,
derived_data_types,
)?
.await?;
maybe_apply_backpressure(
ctx,
mutable_counters,
backpressure_params,
commit_syncer.get_target_repo(),
scuba_sample.clone(),
sleep_secs,
)
.await?;
}
} else {
info!(
ctx.logger(),
"skipping log entry #{} for {}", entry.id, entry.bookmark_name
);
let mut scuba_sample = scuba_sample.clone();
scuba_sample.add("source_bookmark_name", format!("{}", entry.bookmark_name));
scuba_sample.add("skipped", true);
scuba_sample.log();
}
// Note that updating the counter might fail after successful sync of the commits.
// This is expected - next run will try to update the counter again without
// re-syncing the commits.
mutable_counters
.set_counter(ctx.clone(), target_repo_id, &counter, entry_id, None)
.compat()
.await?;
}
Ok(true)
}
}
async fn maybe_apply_backpressure<C>(
ctx: &CoreContext,
mutable_counters: &C,
backpressure_params: &BackpressureParams,
target_repo: &BlobRepo,
scuba_sample: MononokeScubaSampleBuilder,
sleep_secs: u64,
) -> Result<(), Error>
where
C: MutableCounters + Clone + Sync +'static,
{
let target_repo_id = target_repo.get_repoid();
let limit = 10;
loop {
let max_further_entries = stream::iter(&backpressure_params.backsync_repos)
.map(Ok)
.map_ok(|repo| {
async move {
let repo_id = repo.get_repoid();
let backsyncer_counter = format_backsyncer_counter(&target_repo_id);
let maybe_counter = mutable_counters
.get_counter(ctx.clone(), repo_id, &backsyncer_counter)
.compat()
.await?;
match maybe_counter {
Some(counter) => {
let bookmark_update_log = repo.bookmark_update_log();
debug!(ctx.logger(), "repo {}, counter {}", repo_id, counter);
bookmark_update_log
.count_further_bookmark_log_entries(
ctx.clone(),
counter as u64,
None, // exclude_reason
)
.await
}
None => {
warn!(
ctx.logger(),
"backsyncer counter not found for repo {}!", repo_id,
);
Ok(0)
}
}
}
})
.try_buffer_unordered(100)
.try_fold(0, |acc, x| future::ready(Ok(::std::cmp::max(acc, x))))
.await?;
if max_further_entries > limit {
reporting::log_backpressure(ctx, max_further_entries, scuba_sample.clone());
tokio::time::sleep(Duration::from_secs(sleep_secs)).await;
} else {
break;
}
}
if backpressure_params.wait_for_target_repo_hg_sync {
wait_for_latest_log_id_to_be_synced(ctx, target_repo, mutable_counters, sleep_secs).await?;
}
Ok(())
}
fn format_counter<M: SyncedCommitMapping + Clone +'static>(
commit_syncer: &CommitSyncer<M>,
) -> String {
let source_repo_id = commit_syncer.get_source_repo_id();
format!("xreposync_from_{}", source_repo_id)
}
async fn run<'a>(
fb: FacebookInit,
ctx: CoreContext,
matches: &'a MononokeMatches<'a>,
) -> Result<(), Error> {
let config_store = matches.config_store();
let mut scuba_sample = get_scuba_sample(ctx.clone(), &matches);
let counters = args::open_source_sql::<SqlMutableCounters>(fb, config_store, &matches)?;
let source_repo_id = args::get_source_repo_id(config_store, &matches)?;
let target_repo_id = args::get_target_repo_id(config_store, &matches)?;
let logger = ctx.logger();
let source_repo = args::open_repo_with_repo_id(fb, &logger, source_repo_id, &matches);
let target_repo = args::open_repo_with_repo_id(fb, &logger, target_repo_id, &matches);
let (source_repo, target_repo): (InnerRepo, InnerRepo) =
try_join(source_repo, target_repo).await?;
let commit_syncer = create_commit_syncer_from_matches(&ctx, &matches).await?;
let live_commit_sync_config = Arc::new(CfgrLiveCommitSyncConfig::new(&logger, &config_store)?);
let common_commit_sync_config =
live_commit_sync_config.get_common_config(source_repo.blob_repo.get_repoid())?;
let common_bookmarks: HashSet<_> = common_commit_sync_config
.common_pushrebase_bookmarks
.clone()
.into_iter()
.collect();
let source_skiplist_index = Source(source_repo.skiplist_index.clone());
let target_skiplist_index = Target(target_repo.skiplist_index.clone());
match matches.subcommand() {
(ARG_ONCE, Some(sub_m)) => {
add_common_fields(&mut scuba_sample, &commit_syncer);
let maybe_target_bookmark = sub_m
.value_of(ARG_TARGET_BOOKMARK)
.map(BookmarkName::new)
.transpose()?;
let bcs = get_starting_commit(&ctx, &sub_m, source_repo.blob_repo.clone()).await?;
run_in_single_commit_mode(
&ctx,
bcs,
|
commit_syncer,
scuba_sample,
source_skiplist_index,
target_skiplist_index,
maybe_target_bookmark,
common_bookmarks,
)
.await
}
(ARG_TAIL, Some(sub_m)) => {
add_common_fields(&mut scuba_sample, &commit_syncer);
let sleep_secs = get_sleep_secs(sub_m)?;
let tailing_args = if sub_m.is_present(ARG_CATCH_UP_ONCE) {
TailingArgs::CatchUpOnce(commit_syncer)
} else {
let config_store = matches.config_store();
TailingArgs::LoopForever(commit_syncer, config_store.clone())
};
let backpressure_params = BackpressureParams::new(&ctx, matches, sub_m).await?;
let derived_data_types: Vec<String> = match sub_m.values_of(ARG_DERIVED_DATA_TYPES) {
Some(derived_data_types) => derived_data_types
.into_iter()
.map(String::from)
.collect::<Vec<_>>(),
None => vec![],
};
let maybe_bookmark_regex = match sub_m.value_of(ARG_BOOKMARK_REGEX) {
Some(regex) => Some(Regex::new(regex)?),
None => None,
};
run_in_tailing_mode(
&ctx,
counters,
source_skiplist_index,
target_skiplist_index,
common_bookmarks,
scuba_sample,
backpressure_params,
derived_data_types,
tailing_args,
sleep_secs,
maybe_bookmark_regex,
)
.await
}
(incorrect, _) => Err(format_err!(
"Incorrect mode of
|
random_line_split
|
|
main.rs
|
::BlobRepo;
use bookmarks::{BookmarkName, Freshness};
use cached_config::ConfigStore;
use clap_old::ArgMatches;
use cmdlib::{
args::{self, MononokeClapApp, MononokeMatches},
helpers, monitoring,
};
use cmdlib_x_repo::create_commit_syncer_from_matches;
use context::CoreContext;
use cross_repo_sync::{
types::{Source, Target},
CommitSyncer,
};
use derived_data_utils::derive_data_for_csids;
use fbinit::FacebookInit;
use futures::{
compat::Future01CompatExt,
future::{self, try_join},
stream::{self, TryStreamExt},
StreamExt,
};
use futures_stats::TimedFutureExt;
use live_commit_sync_config::{CfgrLiveCommitSyncConfig, LiveCommitSyncConfig};
use mononoke_api_types::InnerRepo;
use mononoke_hg_sync_job_helper_lib::wait_for_latest_log_id_to_be_synced;
use mononoke_types::{ChangesetId, RepositoryId};
use mutable_counters::{MutableCounters, SqlMutableCounters};
use regex::Regex;
use scuba_ext::MononokeScubaSampleBuilder;
use skiplist::SkiplistIndex;
use slog::{debug, error, info, warn};
use std::{collections::HashSet, sync::Arc, time::Duration};
use synced_commit_mapping::SyncedCommitMapping;
mod cli;
mod reporting;
mod setup;
mod sync;
use crate::cli::{
create_app, ARG_BACKSYNC_BACKPRESSURE_REPOS_IDS, ARG_BOOKMARK_REGEX, ARG_CATCH_UP_ONCE,
ARG_DERIVED_DATA_TYPES, ARG_HG_SYNC_BACKPRESSURE, ARG_ONCE, ARG_TAIL, ARG_TARGET_BOOKMARK,
};
use crate::reporting::{add_common_fields, log_bookmark_update_result, log_noop_iteration};
use crate::setup::{get_scuba_sample, get_sleep_secs, get_starting_commit};
use crate::sync::{sync_commit_and_ancestors, sync_single_bookmark_update_log};
fn print_error(ctx: CoreContext, error: &Error) {
error!(ctx.logger(), "{}", error);
for cause in error.chain().skip(1) {
error!(ctx.logger(), "caused by: {}", cause);
}
}
async fn run_in_single_commit_mode<M: SyncedCommitMapping + Clone +'static>(
ctx: &CoreContext,
bcs: ChangesetId,
commit_syncer: CommitSyncer<M>,
scuba_sample: MononokeScubaSampleBuilder,
source_skiplist_index: Source<Arc<SkiplistIndex>>,
target_skiplist_index: Target<Arc<SkiplistIndex>>,
maybe_bookmark: Option<BookmarkName>,
common_bookmarks: HashSet<BookmarkName>,
) -> Result<(), Error> {
info!(
ctx.logger(),
"Checking if {} is already synced {}->{}",
bcs,
commit_syncer.repos.get_source_repo().get_repoid(),
commit_syncer.repos.get_target_repo().get_repoid()
);
if commit_syncer
.commit_sync_outcome_exists(ctx, Source(bcs))
.await?
{
info!(ctx.logger(), "{} is already synced", bcs);
return Ok(());
}
let res = sync_commit_and_ancestors(
ctx,
&commit_syncer,
None, // from_cs_id,
bcs,
maybe_bookmark,
&source_skiplist_index,
&target_skiplist_index,
&common_bookmarks,
scuba_sample,
)
.await;
if res.is_ok() {
info!(ctx.logger(), "successful sync");
}
res.map(|_| ())
}
enum TailingArgs<M> {
CatchUpOnce(CommitSyncer<M>),
LoopForever(CommitSyncer<M>, ConfigStore),
}
async fn run_in_tailing_mode<
M: SyncedCommitMapping + Clone +'static,
C: MutableCounters + Clone + Sync +'static,
>(
ctx: &CoreContext,
mutable_counters: C,
source_skiplist_index: Source<Arc<SkiplistIndex>>,
target_skiplist_index: Target<Arc<SkiplistIndex>>,
common_pushrebase_bookmarks: HashSet<BookmarkName>,
base_scuba_sample: MononokeScubaSampleBuilder,
backpressure_params: BackpressureParams,
derived_data_types: Vec<String>,
tailing_args: TailingArgs<M>,
sleep_secs: u64,
maybe_bookmark_regex: Option<Regex>,
) -> Result<(), Error> {
match tailing_args {
TailingArgs::CatchUpOnce(commit_syncer) => {
let scuba_sample = MononokeScubaSampleBuilder::with_discard();
tail(
&ctx,
&commit_syncer,
&mutable_counters,
scuba_sample,
&common_pushrebase_bookmarks,
&source_skiplist_index,
&target_skiplist_index,
&backpressure_params,
&derived_data_types,
sleep_secs,
&maybe_bookmark_regex,
)
.await?;
}
TailingArgs::LoopForever(commit_syncer, config_store) => {
let live_commit_sync_config =
Arc::new(CfgrLiveCommitSyncConfig::new(ctx.logger(), &config_store)?);
let source_repo_id = commit_syncer.get_source_repo().get_repoid();
loop {
let scuba_sample = base_scuba_sample.clone();
// We only care about public pushes because draft pushes are not in the bookmark
// update log at all.
let enabled =
live_commit_sync_config.push_redirector_enabled_for_public(source_repo_id);
// Pushredirection is enabled - we need to disable forward sync in that case
if enabled {
log_noop_iteration(scuba_sample);
tokio::time::sleep(Duration::new(sleep_secs, 0)).await;
continue;
}
let synced_something = tail(
&ctx,
&commit_syncer,
&mutable_counters,
scuba_sample.clone(),
&common_pushrebase_bookmarks,
&source_skiplist_index,
&target_skiplist_index,
&backpressure_params,
&derived_data_types,
sleep_secs,
&maybe_bookmark_regex,
)
.await?;
if!synced_something {
log_noop_iteration(scuba_sample);
tokio::time::sleep(Duration::new(sleep_secs, 0)).await;
}
}
}
}
Ok(())
}
async fn
|
<
M: SyncedCommitMapping + Clone +'static,
C: MutableCounters + Clone + Sync +'static,
>(
ctx: &CoreContext,
commit_syncer: &CommitSyncer<M>,
mutable_counters: &C,
mut scuba_sample: MononokeScubaSampleBuilder,
common_pushrebase_bookmarks: &HashSet<BookmarkName>,
source_skiplist_index: &Source<Arc<SkiplistIndex>>,
target_skiplist_index: &Target<Arc<SkiplistIndex>>,
backpressure_params: &BackpressureParams,
derived_data_types: &[String],
sleep_secs: u64,
maybe_bookmark_regex: &Option<Regex>,
) -> Result<bool, Error> {
let source_repo = commit_syncer.get_source_repo();
let target_repo_id = commit_syncer.get_target_repo_id();
let bookmark_update_log = source_repo.bookmark_update_log();
let counter = format_counter(&commit_syncer);
let maybe_start_id = mutable_counters
.get_counter(ctx.clone(), target_repo_id, &counter)
.compat()
.await?;
let start_id = maybe_start_id.ok_or(format_err!("counter not found"))?;
let limit = 10;
let log_entries = bookmark_update_log
.read_next_bookmark_log_entries(ctx.clone(), start_id as u64, limit, Freshness::MaybeStale)
.try_collect::<Vec<_>>()
.await?;
let remaining_entries = commit_syncer
.get_source_repo()
.count_further_bookmark_log_entries(ctx.clone(), start_id as u64, None)
.await?;
if log_entries.is_empty() {
log_noop_iteration(scuba_sample.clone());
Ok(false)
} else {
scuba_sample.add("queue_size", remaining_entries);
info!(ctx.logger(), "queue size is {}", remaining_entries);
for entry in log_entries {
let entry_id = entry.id;
scuba_sample.add("entry_id", entry.id);
let mut skip = false;
if let Some(regex) = maybe_bookmark_regex {
if!regex.is_match(entry.bookmark_name.as_str()) {
skip = true;
}
}
if!skip {
let (stats, res) = sync_single_bookmark_update_log(
&ctx,
&commit_syncer,
entry,
source_skiplist_index,
target_skiplist_index,
&common_pushrebase_bookmarks,
scuba_sample.clone(),
)
.timed()
.await;
log_bookmark_update_result(&ctx, entry_id, scuba_sample.clone(), &res, stats);
let maybe_synced_css = res?;
if let SyncResult::Synced(synced_css) = maybe_synced_css {
derive_data_for_csids(
&ctx,
&commit_syncer.get_target_repo(),
synced_css,
derived_data_types,
)?
.await?;
maybe_apply_backpressure(
ctx,
mutable_counters,
backpressure_params,
commit_syncer.get_target_repo(),
scuba_sample.clone(),
sleep_secs,
)
.await?;
}
} else {
info!(
ctx.logger(),
"skipping log entry #{} for {}", entry.id, entry.bookmark_name
);
let mut scuba_sample = scuba_sample.clone();
scuba_sample.add("source_bookmark_name", format!("{}", entry.bookmark_name));
scuba_sample.add("skipped", true);
scuba_sample.log();
}
// Note that updating the counter might fail after successful sync of the commits.
// This is expected - next run will try to update the counter again without
// re-syncing the commits.
mutable_counters
.set_counter(ctx.clone(), target_repo_id, &counter, entry_id, None)
.compat()
.await?;
}
Ok(true)
}
}
async fn maybe_apply_backpressure<C>(
ctx: &CoreContext,
mutable_counters: &C,
backpressure_params: &BackpressureParams,
target_repo: &BlobRepo,
scuba_sample: MononokeScubaSampleBuilder,
sleep_secs: u64,
) -> Result<(), Error>
where
C: MutableCounters + Clone + Sync +'static,
{
let target_repo_id = target_repo.get_repoid();
let limit = 10;
loop {
let max_further_entries = stream::iter(&backpressure_params.backsync_repos)
.map(Ok)
.map_ok(|repo| {
async move {
let repo_id = repo.get_repoid();
let backsyncer_counter = format_backsyncer_counter(&target_repo_id);
let maybe_counter = mutable_counters
.get_counter(ctx.clone(), repo_id, &backsyncer_counter)
.compat()
.await?;
match maybe_counter {
Some(counter) => {
let bookmark_update_log = repo.bookmark_update_log();
debug!(ctx.logger(), "repo {}, counter {}", repo_id, counter);
bookmark_update_log
.count_further_bookmark_log_entries(
ctx.clone(),
counter as u64,
None, // exclude_reason
)
.await
}
None => {
warn!(
ctx.logger(),
"backsyncer counter not found for repo {}!", repo_id,
);
Ok(0)
}
}
}
})
.try_buffer_unordered(100)
.try_fold(0, |acc, x| future::ready(Ok(::std::cmp::max(acc, x))))
.await?;
if max_further_entries > limit {
reporting::log_backpressure(ctx, max_further_entries, scuba_sample.clone());
tokio::time::sleep(Duration::from_secs(sleep_secs)).await;
} else {
break;
}
}
if backpressure_params.wait_for_target_repo_hg_sync {
wait_for_latest_log_id_to_be_synced(ctx, target_repo, mutable_counters, sleep_secs).await?;
}
Ok(())
}
fn format_counter<M: SyncedCommitMapping + Clone +'static>(
commit_syncer: &CommitSyncer<M>,
) -> String {
let source_repo_id = commit_syncer.get_source_repo_id();
format!("xreposync_from_{}", source_repo_id)
}
async fn run<'a>(
fb: FacebookInit,
ctx: CoreContext,
matches: &'a MononokeMatches<'a>,
) -> Result<(), Error> {
let config_store = matches.config_store();
let mut scuba_sample = get_scuba_sample(ctx.clone(), &matches);
let counters = args::open_source_sql::<SqlMutableCounters>(fb, config_store, &matches)?;
let source_repo_id = args::get_source_repo_id(config_store, &matches)?;
let target_repo_id = args::get_target_repo_id(config_store, &matches)?;
let logger = ctx.logger();
let source_repo = args::open_repo_with_repo_id(fb, &logger, source_repo_id, &matches);
let target_repo = args::open_repo_with_repo_id(fb, &logger, target_repo_id, &matches);
let (source_repo, target_repo): (InnerRepo, InnerRepo) =
try_join(source_repo, target_repo).await?;
let commit_syncer = create_commit_syncer_from_matches(&ctx, &matches).await?;
let live_commit_sync_config = Arc::new(CfgrLiveCommitSyncConfig::new(&logger, &config_store)?);
let common_commit_sync_config =
live_commit_sync_config.get_common_config(source_repo.blob_repo.get_repoid())?;
let common_bookmarks: HashSet<_> = common_commit_sync_config
.common_pushrebase_bookmarks
.clone()
.into_iter()
.collect();
let source_skiplist_index = Source(source_repo.skiplist_index.clone());
let target_skiplist_index = Target(target_repo.skiplist_index.clone());
match matches.subcommand() {
(ARG_ONCE, Some(sub_m)) => {
add_common_fields(&mut scuba_sample, &commit_syncer);
let maybe_target_bookmark = sub_m
.value_of(ARG_TARGET_BOOKMARK)
.map(BookmarkName::new)
.transpose()?;
let bcs = get_starting_commit(&ctx, &sub_m, source_repo.blob_repo.clone()).await?;
run_in_single_commit_mode(
&ctx,
bcs,
commit_syncer,
scuba_sample,
source_skiplist_index,
target_skiplist_index,
maybe_target_bookmark,
common_bookmarks,
)
.await
}
(ARG_TAIL, Some(sub_m)) => {
add_common_fields(&mut scuba_sample, &commit_syncer);
let sleep_secs = get_sleep_secs(sub_m)?;
let tailing_args = if sub_m.is_present(ARG_CATCH_UP_ONCE) {
TailingArgs::CatchUpOnce(commit_syncer)
} else {
let config_store = matches.config_store();
TailingArgs::LoopForever(commit_syncer, config_store.clone())
};
let backpressure_params = BackpressureParams::new(&ctx, matches, sub_m).await?;
let derived_data_types: Vec<String> = match sub_m.values_of(ARG_DERIVED_DATA_TYPES) {
Some(derived_data_types) => derived_data_types
.into_iter()
.map(String::from)
.collect::<Vec<_>>(),
None => vec![],
};
let maybe_bookmark_regex = match sub_m.value_of(ARG_BOOKMARK_REGEX) {
Some(regex) => Some(Regex::new(regex)?),
None => None,
};
run_in_tailing_mode(
&ctx,
counters,
source_skiplist_index,
target_skiplist_index,
common_bookmarks,
scuba_sample,
backpressure_params,
derived_data_types,
tailing_args,
sleep_secs,
maybe_bookmark_regex,
)
.await
}
(incorrect, _) => Err(format_err!(
"Incorrect mode
|
tail
|
identifier_name
|
main.rs
|
use futures::{
compat::Future01CompatExt,
future::{self, try_join},
stream::{self, TryStreamExt},
StreamExt,
};
use futures_stats::TimedFutureExt;
use live_commit_sync_config::{CfgrLiveCommitSyncConfig, LiveCommitSyncConfig};
use mononoke_api_types::InnerRepo;
use mononoke_hg_sync_job_helper_lib::wait_for_latest_log_id_to_be_synced;
use mononoke_types::{ChangesetId, RepositoryId};
use mutable_counters::{MutableCounters, SqlMutableCounters};
use regex::Regex;
use scuba_ext::MononokeScubaSampleBuilder;
use skiplist::SkiplistIndex;
use slog::{debug, error, info, warn};
use std::{collections::HashSet, sync::Arc, time::Duration};
use synced_commit_mapping::SyncedCommitMapping;
mod cli;
mod reporting;
mod setup;
mod sync;
use crate::cli::{
create_app, ARG_BACKSYNC_BACKPRESSURE_REPOS_IDS, ARG_BOOKMARK_REGEX, ARG_CATCH_UP_ONCE,
ARG_DERIVED_DATA_TYPES, ARG_HG_SYNC_BACKPRESSURE, ARG_ONCE, ARG_TAIL, ARG_TARGET_BOOKMARK,
};
use crate::reporting::{add_common_fields, log_bookmark_update_result, log_noop_iteration};
use crate::setup::{get_scuba_sample, get_sleep_secs, get_starting_commit};
use crate::sync::{sync_commit_and_ancestors, sync_single_bookmark_update_log};
fn print_error(ctx: CoreContext, error: &Error) {
error!(ctx.logger(), "{}", error);
for cause in error.chain().skip(1) {
error!(ctx.logger(), "caused by: {}", cause);
}
}
async fn run_in_single_commit_mode<M: SyncedCommitMapping + Clone +'static>(
ctx: &CoreContext,
bcs: ChangesetId,
commit_syncer: CommitSyncer<M>,
scuba_sample: MononokeScubaSampleBuilder,
source_skiplist_index: Source<Arc<SkiplistIndex>>,
target_skiplist_index: Target<Arc<SkiplistIndex>>,
maybe_bookmark: Option<BookmarkName>,
common_bookmarks: HashSet<BookmarkName>,
) -> Result<(), Error> {
info!(
ctx.logger(),
"Checking if {} is already synced {}->{}",
bcs,
commit_syncer.repos.get_source_repo().get_repoid(),
commit_syncer.repos.get_target_repo().get_repoid()
);
if commit_syncer
.commit_sync_outcome_exists(ctx, Source(bcs))
.await?
{
info!(ctx.logger(), "{} is already synced", bcs);
return Ok(());
}
let res = sync_commit_and_ancestors(
ctx,
&commit_syncer,
None, // from_cs_id,
bcs,
maybe_bookmark,
&source_skiplist_index,
&target_skiplist_index,
&common_bookmarks,
scuba_sample,
)
.await;
if res.is_ok() {
info!(ctx.logger(), "successful sync");
}
res.map(|_| ())
}
enum TailingArgs<M> {
CatchUpOnce(CommitSyncer<M>),
LoopForever(CommitSyncer<M>, ConfigStore),
}
async fn run_in_tailing_mode<
M: SyncedCommitMapping + Clone +'static,
C: MutableCounters + Clone + Sync +'static,
>(
ctx: &CoreContext,
mutable_counters: C,
source_skiplist_index: Source<Arc<SkiplistIndex>>,
target_skiplist_index: Target<Arc<SkiplistIndex>>,
common_pushrebase_bookmarks: HashSet<BookmarkName>,
base_scuba_sample: MononokeScubaSampleBuilder,
backpressure_params: BackpressureParams,
derived_data_types: Vec<String>,
tailing_args: TailingArgs<M>,
sleep_secs: u64,
maybe_bookmark_regex: Option<Regex>,
) -> Result<(), Error> {
match tailing_args {
TailingArgs::CatchUpOnce(commit_syncer) => {
let scuba_sample = MononokeScubaSampleBuilder::with_discard();
tail(
&ctx,
&commit_syncer,
&mutable_counters,
scuba_sample,
&common_pushrebase_bookmarks,
&source_skiplist_index,
&target_skiplist_index,
&backpressure_params,
&derived_data_types,
sleep_secs,
&maybe_bookmark_regex,
)
.await?;
}
TailingArgs::LoopForever(commit_syncer, config_store) => {
let live_commit_sync_config =
Arc::new(CfgrLiveCommitSyncConfig::new(ctx.logger(), &config_store)?);
let source_repo_id = commit_syncer.get_source_repo().get_repoid();
loop {
let scuba_sample = base_scuba_sample.clone();
// We only care about public pushes because draft pushes are not in the bookmark
// update log at all.
let enabled =
live_commit_sync_config.push_redirector_enabled_for_public(source_repo_id);
// Pushredirection is enabled - we need to disable forward sync in that case
if enabled {
log_noop_iteration(scuba_sample);
tokio::time::sleep(Duration::new(sleep_secs, 0)).await;
continue;
}
let synced_something = tail(
&ctx,
&commit_syncer,
&mutable_counters,
scuba_sample.clone(),
&common_pushrebase_bookmarks,
&source_skiplist_index,
&target_skiplist_index,
&backpressure_params,
&derived_data_types,
sleep_secs,
&maybe_bookmark_regex,
)
.await?;
if!synced_something {
log_noop_iteration(scuba_sample);
tokio::time::sleep(Duration::new(sleep_secs, 0)).await;
}
}
}
}
Ok(())
}
async fn tail<
M: SyncedCommitMapping + Clone +'static,
C: MutableCounters + Clone + Sync +'static,
>(
ctx: &CoreContext,
commit_syncer: &CommitSyncer<M>,
mutable_counters: &C,
mut scuba_sample: MononokeScubaSampleBuilder,
common_pushrebase_bookmarks: &HashSet<BookmarkName>,
source_skiplist_index: &Source<Arc<SkiplistIndex>>,
target_skiplist_index: &Target<Arc<SkiplistIndex>>,
backpressure_params: &BackpressureParams,
derived_data_types: &[String],
sleep_secs: u64,
maybe_bookmark_regex: &Option<Regex>,
) -> Result<bool, Error> {
let source_repo = commit_syncer.get_source_repo();
let target_repo_id = commit_syncer.get_target_repo_id();
let bookmark_update_log = source_repo.bookmark_update_log();
let counter = format_counter(&commit_syncer);
let maybe_start_id = mutable_counters
.get_counter(ctx.clone(), target_repo_id, &counter)
.compat()
.await?;
let start_id = maybe_start_id.ok_or(format_err!("counter not found"))?;
let limit = 10;
let log_entries = bookmark_update_log
.read_next_bookmark_log_entries(ctx.clone(), start_id as u64, limit, Freshness::MaybeStale)
.try_collect::<Vec<_>>()
.await?;
let remaining_entries = commit_syncer
.get_source_repo()
.count_further_bookmark_log_entries(ctx.clone(), start_id as u64, None)
.await?;
if log_entries.is_empty() {
log_noop_iteration(scuba_sample.clone());
Ok(false)
} else {
scuba_sample.add("queue_size", remaining_entries);
info!(ctx.logger(), "queue size is {}", remaining_entries);
for entry in log_entries {
let entry_id = entry.id;
scuba_sample.add("entry_id", entry.id);
let mut skip = false;
if let Some(regex) = maybe_bookmark_regex {
if!regex.is_match(entry.bookmark_name.as_str()) {
skip = true;
}
}
if!skip {
let (stats, res) = sync_single_bookmark_update_log(
&ctx,
&commit_syncer,
entry,
source_skiplist_index,
target_skiplist_index,
&common_pushrebase_bookmarks,
scuba_sample.clone(),
)
.timed()
.await;
log_bookmark_update_result(&ctx, entry_id, scuba_sample.clone(), &res, stats);
let maybe_synced_css = res?;
if let SyncResult::Synced(synced_css) = maybe_synced_css {
derive_data_for_csids(
&ctx,
&commit_syncer.get_target_repo(),
synced_css,
derived_data_types,
)?
.await?;
maybe_apply_backpressure(
ctx,
mutable_counters,
backpressure_params,
commit_syncer.get_target_repo(),
scuba_sample.clone(),
sleep_secs,
)
.await?;
}
} else {
info!(
ctx.logger(),
"skipping log entry #{} for {}", entry.id, entry.bookmark_name
);
let mut scuba_sample = scuba_sample.clone();
scuba_sample.add("source_bookmark_name", format!("{}", entry.bookmark_name));
scuba_sample.add("skipped", true);
scuba_sample.log();
}
// Note that updating the counter might fail after successful sync of the commits.
// This is expected - next run will try to update the counter again without
// re-syncing the commits.
mutable_counters
.set_counter(ctx.clone(), target_repo_id, &counter, entry_id, None)
.compat()
.await?;
}
Ok(true)
}
}
async fn maybe_apply_backpressure<C>(
ctx: &CoreContext,
mutable_counters: &C,
backpressure_params: &BackpressureParams,
target_repo: &BlobRepo,
scuba_sample: MononokeScubaSampleBuilder,
sleep_secs: u64,
) -> Result<(), Error>
where
C: MutableCounters + Clone + Sync +'static,
{
let target_repo_id = target_repo.get_repoid();
let limit = 10;
loop {
let max_further_entries = stream::iter(&backpressure_params.backsync_repos)
.map(Ok)
.map_ok(|repo| {
async move {
let repo_id = repo.get_repoid();
let backsyncer_counter = format_backsyncer_counter(&target_repo_id);
let maybe_counter = mutable_counters
.get_counter(ctx.clone(), repo_id, &backsyncer_counter)
.compat()
.await?;
match maybe_counter {
Some(counter) => {
let bookmark_update_log = repo.bookmark_update_log();
debug!(ctx.logger(), "repo {}, counter {}", repo_id, counter);
bookmark_update_log
.count_further_bookmark_log_entries(
ctx.clone(),
counter as u64,
None, // exclude_reason
)
.await
}
None => {
warn!(
ctx.logger(),
"backsyncer counter not found for repo {}!", repo_id,
);
Ok(0)
}
}
}
})
.try_buffer_unordered(100)
.try_fold(0, |acc, x| future::ready(Ok(::std::cmp::max(acc, x))))
.await?;
if max_further_entries > limit {
reporting::log_backpressure(ctx, max_further_entries, scuba_sample.clone());
tokio::time::sleep(Duration::from_secs(sleep_secs)).await;
} else {
break;
}
}
if backpressure_params.wait_for_target_repo_hg_sync {
wait_for_latest_log_id_to_be_synced(ctx, target_repo, mutable_counters, sleep_secs).await?;
}
Ok(())
}
fn format_counter<M: SyncedCommitMapping + Clone +'static>(
commit_syncer: &CommitSyncer<M>,
) -> String {
let source_repo_id = commit_syncer.get_source_repo_id();
format!("xreposync_from_{}", source_repo_id)
}
async fn run<'a>(
fb: FacebookInit,
ctx: CoreContext,
matches: &'a MononokeMatches<'a>,
) -> Result<(), Error> {
let config_store = matches.config_store();
let mut scuba_sample = get_scuba_sample(ctx.clone(), &matches);
let counters = args::open_source_sql::<SqlMutableCounters>(fb, config_store, &matches)?;
let source_repo_id = args::get_source_repo_id(config_store, &matches)?;
let target_repo_id = args::get_target_repo_id(config_store, &matches)?;
let logger = ctx.logger();
let source_repo = args::open_repo_with_repo_id(fb, &logger, source_repo_id, &matches);
let target_repo = args::open_repo_with_repo_id(fb, &logger, target_repo_id, &matches);
let (source_repo, target_repo): (InnerRepo, InnerRepo) =
try_join(source_repo, target_repo).await?;
let commit_syncer = create_commit_syncer_from_matches(&ctx, &matches).await?;
let live_commit_sync_config = Arc::new(CfgrLiveCommitSyncConfig::new(&logger, &config_store)?);
let common_commit_sync_config =
live_commit_sync_config.get_common_config(source_repo.blob_repo.get_repoid())?;
let common_bookmarks: HashSet<_> = common_commit_sync_config
.common_pushrebase_bookmarks
.clone()
.into_iter()
.collect();
let source_skiplist_index = Source(source_repo.skiplist_index.clone());
let target_skiplist_index = Target(target_repo.skiplist_index.clone());
match matches.subcommand() {
(ARG_ONCE, Some(sub_m)) => {
add_common_fields(&mut scuba_sample, &commit_syncer);
let maybe_target_bookmark = sub_m
.value_of(ARG_TARGET_BOOKMARK)
.map(BookmarkName::new)
.transpose()?;
let bcs = get_starting_commit(&ctx, &sub_m, source_repo.blob_repo.clone()).await?;
run_in_single_commit_mode(
&ctx,
bcs,
commit_syncer,
scuba_sample,
source_skiplist_index,
target_skiplist_index,
maybe_target_bookmark,
common_bookmarks,
)
.await
}
(ARG_TAIL, Some(sub_m)) => {
add_common_fields(&mut scuba_sample, &commit_syncer);
let sleep_secs = get_sleep_secs(sub_m)?;
let tailing_args = if sub_m.is_present(ARG_CATCH_UP_ONCE) {
TailingArgs::CatchUpOnce(commit_syncer)
} else {
let config_store = matches.config_store();
TailingArgs::LoopForever(commit_syncer, config_store.clone())
};
let backpressure_params = BackpressureParams::new(&ctx, matches, sub_m).await?;
let derived_data_types: Vec<String> = match sub_m.values_of(ARG_DERIVED_DATA_TYPES) {
Some(derived_data_types) => derived_data_types
.into_iter()
.map(String::from)
.collect::<Vec<_>>(),
None => vec![],
};
let maybe_bookmark_regex = match sub_m.value_of(ARG_BOOKMARK_REGEX) {
Some(regex) => Some(Regex::new(regex)?),
None => None,
};
run_in_tailing_mode(
&ctx,
counters,
source_skiplist_index,
target_skiplist_index,
common_bookmarks,
scuba_sample,
backpressure_params,
derived_data_types,
tailing_args,
sleep_secs,
maybe_bookmark_regex,
)
.await
}
(incorrect, _) => Err(format_err!(
"Incorrect mode of operation specified: {}",
incorrect
)),
}
}
fn context_and_matches<'a>(
fb: FacebookInit,
app: MononokeClapApp<'a, '_>,
) -> Result<(CoreContext, MononokeMatches<'a>), Error>
|
{
let matches = app.get_matches(fb)?;
let logger = matches.logger();
let ctx = CoreContext::new_with_logger(fb, logger.clone());
Ok((ctx, matches))
}
|
identifier_body
|
|
2_6_attraction.rs
|
// The Nature of Code
// Daniel Shiffman
// http://natureofcode.com
//
// Example 2-6: Attraction
use nannou::prelude::*;
fn main() {
nannou::app(model).update(update).run();
}
struct Model {
mover: Mover,
attractor: Attractor,
}
struct Mover {
position: Point2,
velocity: Vector2,
acceleration: Vector2,
mass: f32,
}
// A type for a draggable attractive body in our world
struct Attractor {
mass: f32, // Maxx, tied to size
position: Point2, // position
dragging: bool, // Is the object being dragged?
roll_over: bool, // Is the mouse over the ellipse?
drag_offset: Vector2, // holds the offset for when the object is clicked on
}
impl Attractor {
const G: f32 = 1.0; // Gravitational Constant
fn new(rect: Rect) -> Self {
let position = rect.xy();
let mass = 20.0;
let drag_offset = vec2(0.0, 0.0);
let dragging = false;
let roll_over = false;
Attractor {
position,
mass,
drag_offset,
dragging,
roll_over,
}
}
fn attract(&self, m: &Mover) -> Vector2 {
let mut force = self.position - m.position; // Calculate direction of force
let mut d = force.magnitude(); // Distance between objects
d = d.max(5.0).min(25.0); // Limiting the distance to eliminate "extreme" results for very cose or very far object
force = force.normalize(); // Normalize vector (distance doesn't matter, we just want this vector for direction)
let strength = (Attractor::G * self.mass * m.mass) / (d * d); // Calculate gravitational force magnitude
force * strength // Get force vector --> magnitude * direction
}
// Method to display
fn display(&self, draw: &Draw) {
let gray = if self.dragging {
0.2
} else if self.roll_over {
0.4
} else {
0.75
};
draw.ellipse()
.xy(self.position)
.w_h(self.mass * 2.0, self.mass * 2.0)
.rgba(gray, gray, gray, 0.8)
.stroke(BLACK)
.stroke_weight(4.0);
}
// The methods below are for mouse interaction
fn clicked(&mut self, mx: f32, my: f32) {
let d = self.position.distance(pt2(mx, my));
if d < self.mass {
self.dragging = true;
self.drag_offset.x = self.position.x - mx;
self.drag_offset.y = self.position.y - my;
}
}
fn hover(&mut self, mx: f32, my: f32) {
let d = self.position.distance(pt2(mx, my));
if d < self.mass {
self.roll_over = true;
} else
|
}
fn stop_dragging(&mut self) {
self.dragging = false;
}
fn drag(&mut self, mx: f32, my: f32) {
if self.dragging {
self.position.x = mx + self.drag_offset.x;
self.position.y = my + self.drag_offset.y;
}
}
}
impl Mover {
fn new() -> Self {
let position = pt2(80.0, 130.0);
let velocity = vec2(1.0, 0.0);
let acceleration = vec2(0.0, 0.0);
let mass = 1.0;
Mover {
position,
velocity,
acceleration,
mass,
}
}
fn apply_force(&mut self, force: Vector2) {
let f = force / self.mass;
self.acceleration += f;
}
fn update(&mut self) {
self.velocity += self.acceleration;
self.position += self.velocity;
self.acceleration *= 0.0;
}
fn display(&self, draw: &Draw) {
draw.ellipse()
.xy(self.position)
.w_h(16.0, 16.0)
.gray(0.3)
.stroke(BLACK)
.stroke_weight(2.0);
}
fn _check_edges(&mut self, rect: Rect) {
if self.position.x > rect.right() {
self.position.x = rect.left();
} else if self.position.x < rect.left() {
self.position.x = rect.right();
}
if self.position.y < rect.bottom() {
self.velocity.y *= -1.0;
self.position.y = rect.bottom();
}
}
}
fn model(app: &App) -> Model {
let rect = Rect::from_w_h(640.0, 360.0);
app.new_window()
.size(rect.w() as u32, rect.h() as u32)
.event(event)
.view(view)
.build()
.unwrap();
let mover = Mover::new();
let attractor = Attractor::new(rect);
Model { mover, attractor }
}
fn event(app: &App, m: &mut Model, event: WindowEvent) {
match event {
MousePressed(_button) => {
m.attractor.clicked(app.mouse.x, app.mouse.y);
}
MouseReleased(_buttom) => {
m.attractor.stop_dragging();
}
_other => (),
}
}
fn update(app: &App, m: &mut Model, _update: Update) {
let force = m.attractor.attract(&m.mover);
m.mover.apply_force(force);
m.mover.update();
m.attractor.drag(app.mouse.x, app.mouse.y);
m.attractor.hover(app.mouse.x, app.mouse.y);
}
fn view(app: &App, m: &Model, frame: Frame) {
// Begin drawing
let draw = app.draw();
draw.background().color(WHITE);
m.attractor.display(&draw);
m.mover.display(&draw);
// Write the result of our drawing to the window's frame.
draw.to_frame(app, &frame).unwrap();
}
|
{
self.roll_over = false;
}
|
conditional_block
|
2_6_attraction.rs
|
// The Nature of Code
// Daniel Shiffman
// http://natureofcode.com
//
// Example 2-6: Attraction
use nannou::prelude::*;
fn main() {
nannou::app(model).update(update).run();
}
struct Model {
mover: Mover,
attractor: Attractor,
}
struct Mover {
position: Point2,
velocity: Vector2,
acceleration: Vector2,
mass: f32,
}
// A type for a draggable attractive body in our world
struct Attractor {
mass: f32, // Maxx, tied to size
position: Point2, // position
dragging: bool, // Is the object being dragged?
roll_over: bool, // Is the mouse over the ellipse?
drag_offset: Vector2, // holds the offset for when the object is clicked on
}
impl Attractor {
const G: f32 = 1.0; // Gravitational Constant
fn new(rect: Rect) -> Self {
let position = rect.xy();
let mass = 20.0;
let drag_offset = vec2(0.0, 0.0);
let dragging = false;
let roll_over = false;
Attractor {
position,
mass,
drag_offset,
dragging,
roll_over,
}
}
fn attract(&self, m: &Mover) -> Vector2 {
let mut force = self.position - m.position; // Calculate direction of force
let mut d = force.magnitude(); // Distance between objects
d = d.max(5.0).min(25.0); // Limiting the distance to eliminate "extreme" results for very cose or very far object
force = force.normalize(); // Normalize vector (distance doesn't matter, we just want this vector for direction)
let strength = (Attractor::G * self.mass * m.mass) / (d * d); // Calculate gravitational force magnitude
force * strength // Get force vector --> magnitude * direction
}
// Method to display
fn display(&self, draw: &Draw) {
let gray = if self.dragging {
0.2
} else if self.roll_over {
0.4
} else {
0.75
};
draw.ellipse()
.xy(self.position)
.w_h(self.mass * 2.0, self.mass * 2.0)
.rgba(gray, gray, gray, 0.8)
.stroke(BLACK)
.stroke_weight(4.0);
}
// The methods below are for mouse interaction
fn clicked(&mut self, mx: f32, my: f32) {
let d = self.position.distance(pt2(mx, my));
if d < self.mass {
self.dragging = true;
self.drag_offset.x = self.position.x - mx;
self.drag_offset.y = self.position.y - my;
}
}
fn hover(&mut self, mx: f32, my: f32) {
let d = self.position.distance(pt2(mx, my));
if d < self.mass {
self.roll_over = true;
} else {
self.roll_over = false;
}
}
fn stop_dragging(&mut self) {
self.dragging = false;
}
fn drag(&mut self, mx: f32, my: f32) {
if self.dragging {
self.position.x = mx + self.drag_offset.x;
self.position.y = my + self.drag_offset.y;
}
}
}
impl Mover {
fn new() -> Self {
let position = pt2(80.0, 130.0);
let velocity = vec2(1.0, 0.0);
let acceleration = vec2(0.0, 0.0);
let mass = 1.0;
Mover {
position,
velocity,
acceleration,
mass,
}
}
fn apply_force(&mut self, force: Vector2) {
let f = force / self.mass;
self.acceleration += f;
}
fn update(&mut self) {
self.velocity += self.acceleration;
self.position += self.velocity;
self.acceleration *= 0.0;
}
fn display(&self, draw: &Draw) {
draw.ellipse()
.xy(self.position)
.w_h(16.0, 16.0)
.gray(0.3)
.stroke(BLACK)
.stroke_weight(2.0);
}
fn _check_edges(&mut self, rect: Rect) {
if self.position.x > rect.right() {
self.position.x = rect.left();
} else if self.position.x < rect.left() {
self.position.x = rect.right();
}
if self.position.y < rect.bottom() {
self.velocity.y *= -1.0;
self.position.y = rect.bottom();
}
}
}
fn model(app: &App) -> Model {
let rect = Rect::from_w_h(640.0, 360.0);
app.new_window()
.size(rect.w() as u32, rect.h() as u32)
.event(event)
.view(view)
.build()
.unwrap();
let mover = Mover::new();
let attractor = Attractor::new(rect);
Model { mover, attractor }
}
fn event(app: &App, m: &mut Model, event: WindowEvent) {
match event {
MousePressed(_button) => {
m.attractor.clicked(app.mouse.x, app.mouse.y);
}
MouseReleased(_buttom) => {
m.attractor.stop_dragging();
}
_other => (),
}
}
fn update(app: &App, m: &mut Model, _update: Update) {
let force = m.attractor.attract(&m.mover);
m.mover.apply_force(force);
m.mover.update();
m.attractor.drag(app.mouse.x, app.mouse.y);
m.attractor.hover(app.mouse.x, app.mouse.y);
}
fn view(app: &App, m: &Model, frame: Frame) {
// Begin drawing
|
// Write the result of our drawing to the window's frame.
draw.to_frame(app, &frame).unwrap();
}
|
let draw = app.draw();
draw.background().color(WHITE);
m.attractor.display(&draw);
m.mover.display(&draw);
|
random_line_split
|
2_6_attraction.rs
|
// The Nature of Code
// Daniel Shiffman
// http://natureofcode.com
//
// Example 2-6: Attraction
use nannou::prelude::*;
fn main() {
nannou::app(model).update(update).run();
}
struct Model {
mover: Mover,
attractor: Attractor,
}
struct Mover {
position: Point2,
velocity: Vector2,
acceleration: Vector2,
mass: f32,
}
// A type for a draggable attractive body in our world
struct Attractor {
mass: f32, // Maxx, tied to size
position: Point2, // position
dragging: bool, // Is the object being dragged?
roll_over: bool, // Is the mouse over the ellipse?
drag_offset: Vector2, // holds the offset for when the object is clicked on
}
impl Attractor {
const G: f32 = 1.0; // Gravitational Constant
fn new(rect: Rect) -> Self {
let position = rect.xy();
let mass = 20.0;
let drag_offset = vec2(0.0, 0.0);
let dragging = false;
let roll_over = false;
Attractor {
position,
mass,
drag_offset,
dragging,
roll_over,
}
}
fn attract(&self, m: &Mover) -> Vector2 {
let mut force = self.position - m.position; // Calculate direction of force
let mut d = force.magnitude(); // Distance between objects
d = d.max(5.0).min(25.0); // Limiting the distance to eliminate "extreme" results for very cose or very far object
force = force.normalize(); // Normalize vector (distance doesn't matter, we just want this vector for direction)
let strength = (Attractor::G * self.mass * m.mass) / (d * d); // Calculate gravitational force magnitude
force * strength // Get force vector --> magnitude * direction
}
// Method to display
fn display(&self, draw: &Draw) {
let gray = if self.dragging {
0.2
} else if self.roll_over {
0.4
} else {
0.75
};
draw.ellipse()
.xy(self.position)
.w_h(self.mass * 2.0, self.mass * 2.0)
.rgba(gray, gray, gray, 0.8)
.stroke(BLACK)
.stroke_weight(4.0);
}
// The methods below are for mouse interaction
fn clicked(&mut self, mx: f32, my: f32) {
let d = self.position.distance(pt2(mx, my));
if d < self.mass {
self.dragging = true;
self.drag_offset.x = self.position.x - mx;
self.drag_offset.y = self.position.y - my;
}
}
fn hover(&mut self, mx: f32, my: f32) {
let d = self.position.distance(pt2(mx, my));
if d < self.mass {
self.roll_over = true;
} else {
self.roll_over = false;
}
}
fn stop_dragging(&mut self) {
self.dragging = false;
}
fn
|
(&mut self, mx: f32, my: f32) {
if self.dragging {
self.position.x = mx + self.drag_offset.x;
self.position.y = my + self.drag_offset.y;
}
}
}
impl Mover {
fn new() -> Self {
let position = pt2(80.0, 130.0);
let velocity = vec2(1.0, 0.0);
let acceleration = vec2(0.0, 0.0);
let mass = 1.0;
Mover {
position,
velocity,
acceleration,
mass,
}
}
fn apply_force(&mut self, force: Vector2) {
let f = force / self.mass;
self.acceleration += f;
}
fn update(&mut self) {
self.velocity += self.acceleration;
self.position += self.velocity;
self.acceleration *= 0.0;
}
fn display(&self, draw: &Draw) {
draw.ellipse()
.xy(self.position)
.w_h(16.0, 16.0)
.gray(0.3)
.stroke(BLACK)
.stroke_weight(2.0);
}
fn _check_edges(&mut self, rect: Rect) {
if self.position.x > rect.right() {
self.position.x = rect.left();
} else if self.position.x < rect.left() {
self.position.x = rect.right();
}
if self.position.y < rect.bottom() {
self.velocity.y *= -1.0;
self.position.y = rect.bottom();
}
}
}
fn model(app: &App) -> Model {
let rect = Rect::from_w_h(640.0, 360.0);
app.new_window()
.size(rect.w() as u32, rect.h() as u32)
.event(event)
.view(view)
.build()
.unwrap();
let mover = Mover::new();
let attractor = Attractor::new(rect);
Model { mover, attractor }
}
fn event(app: &App, m: &mut Model, event: WindowEvent) {
match event {
MousePressed(_button) => {
m.attractor.clicked(app.mouse.x, app.mouse.y);
}
MouseReleased(_buttom) => {
m.attractor.stop_dragging();
}
_other => (),
}
}
fn update(app: &App, m: &mut Model, _update: Update) {
let force = m.attractor.attract(&m.mover);
m.mover.apply_force(force);
m.mover.update();
m.attractor.drag(app.mouse.x, app.mouse.y);
m.attractor.hover(app.mouse.x, app.mouse.y);
}
fn view(app: &App, m: &Model, frame: Frame) {
// Begin drawing
let draw = app.draw();
draw.background().color(WHITE);
m.attractor.display(&draw);
m.mover.display(&draw);
// Write the result of our drawing to the window's frame.
draw.to_frame(app, &frame).unwrap();
}
|
drag
|
identifier_name
|
elided-lifetime.rs
|
// aux-build:elided-lifetime.rs
//
// rust-lang/rust#75225
//
// Since Rust 2018 we encourage writing out <'_> explicitly to make it clear
// that borrowing is occuring. Make sure rustdoc is following the same idiom.
|
pub struct Ref<'a>(&'a u32);
type ARef<'a> = Ref<'a>;
// @has foo/fn.test1.html
// @matches - "Ref</a><'_>"
pub fn test1(a: &u32) -> Ref {
Ref(a)
}
// @has foo/fn.test2.html
// @matches - "Ref</a><'_>"
pub fn test2(a: &u32) -> Ref<'_> {
Ref(a)
}
// @has foo/fn.test3.html
// @matches - "Ref</a><'_>"
pub fn test3(a: &u32) -> ARef {
Ref(a)
}
// @has foo/fn.test4.html
// @matches - "Ref</a><'_>"
pub fn test4(a: &u32) -> ARef<'_> {
Ref(a)
}
// Ensure external paths in inlined docs also display elided lifetime
// @has foo/bar/fn.test5.html
// @matches - "Ref</a><'_>"
// @has foo/bar/fn.test6.html
// @matches - "Ref</a><'_>"
#[doc(inline)]
pub extern crate bar;
|
#![crate_name = "foo"]
|
random_line_split
|
elided-lifetime.rs
|
// aux-build:elided-lifetime.rs
//
// rust-lang/rust#75225
//
// Since Rust 2018 we encourage writing out <'_> explicitly to make it clear
// that borrowing is occuring. Make sure rustdoc is following the same idiom.
#![crate_name = "foo"]
pub struct Ref<'a>(&'a u32);
type ARef<'a> = Ref<'a>;
// @has foo/fn.test1.html
// @matches - "Ref</a><'_>"
pub fn test1(a: &u32) -> Ref {
Ref(a)
}
// @has foo/fn.test2.html
// @matches - "Ref</a><'_>"
pub fn test2(a: &u32) -> Ref<'_> {
Ref(a)
}
// @has foo/fn.test3.html
// @matches - "Ref</a><'_>"
pub fn test3(a: &u32) -> ARef {
Ref(a)
}
// @has foo/fn.test4.html
// @matches - "Ref</a><'_>"
pub fn
|
(a: &u32) -> ARef<'_> {
Ref(a)
}
// Ensure external paths in inlined docs also display elided lifetime
// @has foo/bar/fn.test5.html
// @matches - "Ref</a><'_>"
// @has foo/bar/fn.test6.html
// @matches - "Ref</a><'_>"
#[doc(inline)]
pub extern crate bar;
|
test4
|
identifier_name
|
compositor_task.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Communication with the compositor task.
use CompositorMsg as ConstellationMsg;
use compositor;
use euclid::point::Point2D;
use euclid::size::Size2D;
use gfx_traits::PaintListener;
use headless;
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use layers::layers::{BufferRequest, LayerBufferSet};
use layers::platform::surface::{NativeDisplay, NativeSurface};
use msg::compositor_msg::{Epoch, FrameTreeId, LayerId, LayerProperties};
use msg::constellation_msg::{AnimationState, PipelineId};
use msg::constellation_msg::{Image, Key, KeyModifiers, KeyState};
use profile_traits::mem;
use profile_traits::time;
use script_traits::{EventResult, ScriptToCompositorMsg};
use std::fmt::{Debug, Error, Formatter};
use std::rc::Rc;
use std::sync::mpsc::{Receiver, Sender, channel};
use style_traits::viewport::ViewportConstraints;
use url::Url;
use util::cursor::Cursor;
use windowing::{WindowEvent, WindowMethods};
pub use constellation::SendableFrameTree;
pub use windowing;
/// Sends messages to the compositor. This is a trait supplied by the port because the method used
/// to communicate with the compositor may have to kick OS event loops awake, communicate cross-
/// process, and so forth.
pub trait CompositorProxy :'static + Send {
/// Sends a message to the compositor.
fn send(&self, msg: Msg);
/// Clones the compositor proxy.
fn clone_compositor_proxy(&self) -> Box<CompositorProxy +'static + Send>;
}
/// The port that the compositor receives messages on. As above, this is a trait supplied by the
/// Servo port.
pub trait CompositorReceiver :'static {
/// Receives the next message inbound for the compositor. This must not block.
fn try_recv_compositor_msg(&mut self) -> Option<Msg>;
/// Synchronously waits for, and returns, the next message inbound for the compositor.
fn recv_compositor_msg(&mut self) -> Msg;
}
/// A convenience implementation of `CompositorReceiver` for a plain old Rust `Receiver`.
impl CompositorReceiver for Receiver<Msg> {
fn try_recv_compositor_msg(&mut self) -> Option<Msg> {
self.try_recv().ok()
}
fn recv_compositor_msg(&mut self) -> Msg {
self.recv().unwrap()
}
}
pub fn run_script_listener_thread(compositor_proxy: Box<CompositorProxy +'static + Send>,
receiver: IpcReceiver<ScriptToCompositorMsg>) {
while let Ok(msg) = receiver.recv() {
match msg {
ScriptToCompositorMsg::ScrollFragmentPoint(pipeline_id, layer_id, point, smooth) => {
compositor_proxy.send(Msg::ScrollFragmentPoint(pipeline_id,
layer_id,
point,
smooth));
}
ScriptToCompositorMsg::GetClientWindow(send) => {
compositor_proxy.send(Msg::GetClientWindow(send));
}
ScriptToCompositorMsg::MoveTo(point) => {
compositor_proxy.send(Msg::MoveTo(point));
}
ScriptToCompositorMsg::ResizeTo(size) => {
compositor_proxy.send(Msg::ResizeTo(size));
}
ScriptToCompositorMsg::Exit => {
let (chan, port) = ipc::channel().unwrap();
compositor_proxy.send(Msg::Exit(chan));
port.recv().unwrap();
}
ScriptToCompositorMsg::SetTitle(pipeline_id, title) => {
compositor_proxy.send(Msg::ChangePageTitle(pipeline_id, title))
}
ScriptToCompositorMsg::SendKeyEvent(key, key_state, key_modifiers) => {
compositor_proxy.send(Msg::KeyEvent(key, key_state, key_modifiers))
}
ScriptToCompositorMsg::TouchEventProcessed(result) => {
compositor_proxy.send(Msg::TouchEventProcessed(result))
}
}
}
}
|
self.send(Msg::GetNativeDisplay(chan));
// If the compositor is shutting down when a paint task
// is being created, the compositor won't respond to
// this message, resulting in an eventual panic. Instead,
// just return None in this case, since the paint task
// will exit shortly and never actually be requested
// to paint buffers by the compositor.
port.recv().unwrap_or(None)
}
fn assign_painted_buffers(&mut self,
pipeline_id: PipelineId,
epoch: Epoch,
replies: Vec<(LayerId, Box<LayerBufferSet>)>,
frame_tree_id: FrameTreeId) {
self.send(Msg::AssignPaintedBuffers(pipeline_id, epoch, replies, frame_tree_id));
}
fn ignore_buffer_requests(&mut self, buffer_requests: Vec<BufferRequest>) {
let mut native_surfaces = Vec::new();
for request in buffer_requests.into_iter() {
if let Some(native_surface) = request.native_surface {
native_surfaces.push(native_surface);
}
}
if!native_surfaces.is_empty() {
self.send(Msg::ReturnUnusedNativeSurfaces(native_surfaces));
}
}
fn initialize_layers_for_pipeline(&mut self,
pipeline_id: PipelineId,
properties: Vec<LayerProperties>,
epoch: Epoch) {
// FIXME(#2004, pcwalton): This assumes that the first layer determines the page size, and
// that all other layers are immediate children of it. This is sufficient to handle
// `position: fixed` but will not be sufficient to handle `overflow: scroll` or transforms.
self.send(Msg::InitializeLayersForPipeline(pipeline_id, epoch, properties));
}
fn notify_paint_task_exiting(&mut self, pipeline_id: PipelineId) {
self.send(Msg::PaintTaskExited(pipeline_id))
}
}
/// Messages from the painting task and the constellation task to the compositor task.
pub enum Msg {
/// Requests that the compositor shut down.
Exit(IpcSender<()>),
/// Informs the compositor that the constellation has completed shutdown.
/// Required because the constellation can have pending calls to make
/// (e.g. SetFrameTree) at the time that we send it an ExitMsg.
ShutdownComplete,
/// Requests the compositor's graphics metadata. Graphics metadata is what the painter needs
/// to create surfaces that the compositor can see. On Linux this is the X display; on Mac this
/// is the pixel format.
///
/// The headless compositor returns `None`.
GetNativeDisplay(Sender<Option<NativeDisplay>>),
/// Tells the compositor to create or update the layers for a pipeline if necessary
/// (i.e. if no layer with that ID exists).
InitializeLayersForPipeline(PipelineId, Epoch, Vec<LayerProperties>),
/// Scroll a page in a window
ScrollFragmentPoint(PipelineId, LayerId, Point2D<f32>, bool),
/// Requests that the compositor assign the painted buffers to the given layers.
AssignPaintedBuffers(PipelineId, Epoch, Vec<(LayerId, Box<LayerBufferSet>)>, FrameTreeId),
/// Alerts the compositor that the current page has changed its title.
ChangePageTitle(PipelineId, Option<String>),
/// Alerts the compositor that the current page has changed its URL.
ChangePageUrl(PipelineId, Url),
/// Alerts the compositor that the given pipeline has changed whether it is running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Replaces the current frame tree, typically called during main frame navigation.
SetFrameTree(SendableFrameTree, IpcSender<()>, Sender<ConstellationMsg>),
/// The load of a page has begun: (can go back, can go forward).
LoadStart(bool, bool),
/// The load of a page has completed: (can go back, can go forward).
LoadComplete(bool, bool),
/// Indicates that the scrolling timeout with the given starting timestamp has happened and a
/// composite should happen. (See the `scrolling` module.)
ScrollTimeout(u64),
RecompositeAfterScroll,
/// Sends an unconsumed key event back to the compositor.
KeyEvent(Key, KeyState, KeyModifiers),
/// Script has handled a touch event, and either prevented or allowed default actions.
TouchEventProcessed(EventResult),
/// Changes the cursor.
SetCursor(Cursor),
/// Composite to a PNG file and return the Image over a passed channel.
CreatePng(IpcSender<Option<Image>>),
/// Informs the compositor that the paint task for the given pipeline has exited.
PaintTaskExited(PipelineId),
/// Alerts the compositor that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
/// A reply to the compositor asking if the output image is stable.
IsReadyToSaveImageReply(bool),
/// A favicon was detected
NewFavicon(Url),
/// <head> tag finished parsing
HeadParsed,
/// Signal that the paint task ignored the paint requests that carried
/// these native surfaces, so that they can be re-added to the surface cache.
ReturnUnusedNativeSurfaces(Vec<NativeSurface>),
/// Collect memory reports and send them back to the given mem::ReportsChan.
CollectMemoryReports(mem::ReportsChan),
/// A status message to be displayed by the browser chrome.
Status(Option<String>),
/// Get Window Informations size and position
GetClientWindow(IpcSender<(Size2D<u32>, Point2D<i32>)>),
/// Move the window to a point
MoveTo(Point2D<i32>),
/// Resize the window to size
ResizeTo(Size2D<u32>),
/// A pipeline was shut down.
PipelineExited(PipelineId),
}
impl Debug for Msg {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
Msg::Exit(..) => write!(f, "Exit"),
Msg::ShutdownComplete => write!(f, "ShutdownComplete"),
Msg::GetNativeDisplay(..) => write!(f, "GetNativeDisplay"),
Msg::InitializeLayersForPipeline(..) => write!(f, "InitializeLayersForPipeline"),
Msg::ScrollFragmentPoint(..) => write!(f, "ScrollFragmentPoint"),
Msg::AssignPaintedBuffers(..) => write!(f, "AssignPaintedBuffers"),
Msg::ChangeRunningAnimationsState(..) => write!(f, "ChangeRunningAnimationsState"),
Msg::ChangePageTitle(..) => write!(f, "ChangePageTitle"),
Msg::ChangePageUrl(..) => write!(f, "ChangePageUrl"),
Msg::SetFrameTree(..) => write!(f, "SetFrameTree"),
Msg::LoadComplete(..) => write!(f, "LoadComplete"),
Msg::LoadStart(..) => write!(f, "LoadStart"),
Msg::ScrollTimeout(..) => write!(f, "ScrollTimeout"),
Msg::RecompositeAfterScroll => write!(f, "RecompositeAfterScroll"),
Msg::KeyEvent(..) => write!(f, "KeyEvent"),
Msg::TouchEventProcessed(..) => write!(f, "TouchEventProcessed"),
Msg::SetCursor(..) => write!(f, "SetCursor"),
Msg::CreatePng(..) => write!(f, "CreatePng"),
Msg::PaintTaskExited(..) => write!(f, "PaintTaskExited"),
Msg::ViewportConstrained(..) => write!(f, "ViewportConstrained"),
Msg::IsReadyToSaveImageReply(..) => write!(f, "IsReadyToSaveImageReply"),
Msg::NewFavicon(..) => write!(f, "NewFavicon"),
Msg::HeadParsed => write!(f, "HeadParsed"),
Msg::ReturnUnusedNativeSurfaces(..) => write!(f, "ReturnUnusedNativeSurfaces"),
Msg::CollectMemoryReports(..) => write!(f, "CollectMemoryReports"),
Msg::Status(..) => write!(f, "Status"),
Msg::GetClientWindow(..) => write!(f, "GetClientWindow"),
Msg::MoveTo(..) => write!(f, "MoveTo"),
Msg::ResizeTo(..) => write!(f, "ResizeTo"),
Msg::PipelineExited(..) => write!(f, "PipelineExited"),
}
}
}
pub struct CompositorTask;
impl CompositorTask {
pub fn create<Window>(window: Option<Rc<Window>>,
state: InitialCompositorState)
-> Box<CompositorEventListener +'static>
where Window: WindowMethods +'static {
match window {
Some(window) => {
box compositor::IOCompositor::create(window, state)
as Box<CompositorEventListener>
}
None => {
box headless::NullCompositor::create(state)
as Box<CompositorEventListener>
}
}
}
}
pub trait CompositorEventListener {
fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool;
fn repaint_synchronously(&mut self);
fn pinch_zoom_level(&self) -> f32;
/// Requests that the compositor send the title for the main frame as soon as possible.
fn title_for_main_frame(&self);
}
/// Data used to construct a compositor.
pub struct InitialCompositorState {
/// A channel to the compositor.
pub sender: Box<CompositorProxy + Send>,
/// A port on which messages inbound to the compositor can be received.
pub receiver: Box<CompositorReceiver>,
/// A channel to the constellation.
pub constellation_chan: Sender<ConstellationMsg>,
/// A channel to the time profiler thread.
pub time_profiler_chan: time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
}
|
/// Implementation of the abstract `PaintListener` interface.
impl PaintListener for Box<CompositorProxy + 'static + Send> {
fn native_display(&mut self) -> Option<NativeDisplay> {
let (chan, port) = channel();
|
random_line_split
|
compositor_task.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Communication with the compositor task.
use CompositorMsg as ConstellationMsg;
use compositor;
use euclid::point::Point2D;
use euclid::size::Size2D;
use gfx_traits::PaintListener;
use headless;
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use layers::layers::{BufferRequest, LayerBufferSet};
use layers::platform::surface::{NativeDisplay, NativeSurface};
use msg::compositor_msg::{Epoch, FrameTreeId, LayerId, LayerProperties};
use msg::constellation_msg::{AnimationState, PipelineId};
use msg::constellation_msg::{Image, Key, KeyModifiers, KeyState};
use profile_traits::mem;
use profile_traits::time;
use script_traits::{EventResult, ScriptToCompositorMsg};
use std::fmt::{Debug, Error, Formatter};
use std::rc::Rc;
use std::sync::mpsc::{Receiver, Sender, channel};
use style_traits::viewport::ViewportConstraints;
use url::Url;
use util::cursor::Cursor;
use windowing::{WindowEvent, WindowMethods};
pub use constellation::SendableFrameTree;
pub use windowing;
/// Sends messages to the compositor. This is a trait supplied by the port because the method used
/// to communicate with the compositor may have to kick OS event loops awake, communicate cross-
/// process, and so forth.
pub trait CompositorProxy :'static + Send {
/// Sends a message to the compositor.
fn send(&self, msg: Msg);
/// Clones the compositor proxy.
fn clone_compositor_proxy(&self) -> Box<CompositorProxy +'static + Send>;
}
/// The port that the compositor receives messages on. As above, this is a trait supplied by the
/// Servo port.
pub trait CompositorReceiver :'static {
/// Receives the next message inbound for the compositor. This must not block.
fn try_recv_compositor_msg(&mut self) -> Option<Msg>;
/// Synchronously waits for, and returns, the next message inbound for the compositor.
fn recv_compositor_msg(&mut self) -> Msg;
}
/// A convenience implementation of `CompositorReceiver` for a plain old Rust `Receiver`.
impl CompositorReceiver for Receiver<Msg> {
fn try_recv_compositor_msg(&mut self) -> Option<Msg> {
self.try_recv().ok()
}
fn recv_compositor_msg(&mut self) -> Msg {
self.recv().unwrap()
}
}
pub fn run_script_listener_thread(compositor_proxy: Box<CompositorProxy +'static + Send>,
receiver: IpcReceiver<ScriptToCompositorMsg>) {
while let Ok(msg) = receiver.recv() {
match msg {
ScriptToCompositorMsg::ScrollFragmentPoint(pipeline_id, layer_id, point, smooth) => {
compositor_proxy.send(Msg::ScrollFragmentPoint(pipeline_id,
layer_id,
point,
smooth));
}
ScriptToCompositorMsg::GetClientWindow(send) => {
compositor_proxy.send(Msg::GetClientWindow(send));
}
ScriptToCompositorMsg::MoveTo(point) => {
compositor_proxy.send(Msg::MoveTo(point));
}
ScriptToCompositorMsg::ResizeTo(size) => {
compositor_proxy.send(Msg::ResizeTo(size));
}
ScriptToCompositorMsg::Exit => {
let (chan, port) = ipc::channel().unwrap();
compositor_proxy.send(Msg::Exit(chan));
port.recv().unwrap();
}
ScriptToCompositorMsg::SetTitle(pipeline_id, title) => {
compositor_proxy.send(Msg::ChangePageTitle(pipeline_id, title))
}
ScriptToCompositorMsg::SendKeyEvent(key, key_state, key_modifiers) => {
compositor_proxy.send(Msg::KeyEvent(key, key_state, key_modifiers))
}
ScriptToCompositorMsg::TouchEventProcessed(result) => {
compositor_proxy.send(Msg::TouchEventProcessed(result))
}
}
}
}
/// Implementation of the abstract `PaintListener` interface.
impl PaintListener for Box<CompositorProxy +'static + Send> {
fn native_display(&mut self) -> Option<NativeDisplay> {
let (chan, port) = channel();
self.send(Msg::GetNativeDisplay(chan));
// If the compositor is shutting down when a paint task
// is being created, the compositor won't respond to
// this message, resulting in an eventual panic. Instead,
// just return None in this case, since the paint task
// will exit shortly and never actually be requested
// to paint buffers by the compositor.
port.recv().unwrap_or(None)
}
fn assign_painted_buffers(&mut self,
pipeline_id: PipelineId,
epoch: Epoch,
replies: Vec<(LayerId, Box<LayerBufferSet>)>,
frame_tree_id: FrameTreeId) {
self.send(Msg::AssignPaintedBuffers(pipeline_id, epoch, replies, frame_tree_id));
}
fn ignore_buffer_requests(&mut self, buffer_requests: Vec<BufferRequest>) {
let mut native_surfaces = Vec::new();
for request in buffer_requests.into_iter() {
if let Some(native_surface) = request.native_surface {
native_surfaces.push(native_surface);
}
}
if!native_surfaces.is_empty() {
self.send(Msg::ReturnUnusedNativeSurfaces(native_surfaces));
}
}
fn initialize_layers_for_pipeline(&mut self,
pipeline_id: PipelineId,
properties: Vec<LayerProperties>,
epoch: Epoch) {
// FIXME(#2004, pcwalton): This assumes that the first layer determines the page size, and
// that all other layers are immediate children of it. This is sufficient to handle
// `position: fixed` but will not be sufficient to handle `overflow: scroll` or transforms.
self.send(Msg::InitializeLayersForPipeline(pipeline_id, epoch, properties));
}
fn notify_paint_task_exiting(&mut self, pipeline_id: PipelineId) {
self.send(Msg::PaintTaskExited(pipeline_id))
}
}
/// Messages from the painting task and the constellation task to the compositor task.
pub enum Msg {
/// Requests that the compositor shut down.
Exit(IpcSender<()>),
/// Informs the compositor that the constellation has completed shutdown.
/// Required because the constellation can have pending calls to make
/// (e.g. SetFrameTree) at the time that we send it an ExitMsg.
ShutdownComplete,
/// Requests the compositor's graphics metadata. Graphics metadata is what the painter needs
/// to create surfaces that the compositor can see. On Linux this is the X display; on Mac this
/// is the pixel format.
///
/// The headless compositor returns `None`.
GetNativeDisplay(Sender<Option<NativeDisplay>>),
/// Tells the compositor to create or update the layers for a pipeline if necessary
/// (i.e. if no layer with that ID exists).
InitializeLayersForPipeline(PipelineId, Epoch, Vec<LayerProperties>),
/// Scroll a page in a window
ScrollFragmentPoint(PipelineId, LayerId, Point2D<f32>, bool),
/// Requests that the compositor assign the painted buffers to the given layers.
AssignPaintedBuffers(PipelineId, Epoch, Vec<(LayerId, Box<LayerBufferSet>)>, FrameTreeId),
/// Alerts the compositor that the current page has changed its title.
ChangePageTitle(PipelineId, Option<String>),
/// Alerts the compositor that the current page has changed its URL.
ChangePageUrl(PipelineId, Url),
/// Alerts the compositor that the given pipeline has changed whether it is running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Replaces the current frame tree, typically called during main frame navigation.
SetFrameTree(SendableFrameTree, IpcSender<()>, Sender<ConstellationMsg>),
/// The load of a page has begun: (can go back, can go forward).
LoadStart(bool, bool),
/// The load of a page has completed: (can go back, can go forward).
LoadComplete(bool, bool),
/// Indicates that the scrolling timeout with the given starting timestamp has happened and a
/// composite should happen. (See the `scrolling` module.)
ScrollTimeout(u64),
RecompositeAfterScroll,
/// Sends an unconsumed key event back to the compositor.
KeyEvent(Key, KeyState, KeyModifiers),
/// Script has handled a touch event, and either prevented or allowed default actions.
TouchEventProcessed(EventResult),
/// Changes the cursor.
SetCursor(Cursor),
/// Composite to a PNG file and return the Image over a passed channel.
CreatePng(IpcSender<Option<Image>>),
/// Informs the compositor that the paint task for the given pipeline has exited.
PaintTaskExited(PipelineId),
/// Alerts the compositor that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
/// A reply to the compositor asking if the output image is stable.
IsReadyToSaveImageReply(bool),
/// A favicon was detected
NewFavicon(Url),
/// <head> tag finished parsing
HeadParsed,
/// Signal that the paint task ignored the paint requests that carried
/// these native surfaces, so that they can be re-added to the surface cache.
ReturnUnusedNativeSurfaces(Vec<NativeSurface>),
/// Collect memory reports and send them back to the given mem::ReportsChan.
CollectMemoryReports(mem::ReportsChan),
/// A status message to be displayed by the browser chrome.
Status(Option<String>),
/// Get Window Informations size and position
GetClientWindow(IpcSender<(Size2D<u32>, Point2D<i32>)>),
/// Move the window to a point
MoveTo(Point2D<i32>),
/// Resize the window to size
ResizeTo(Size2D<u32>),
/// A pipeline was shut down.
PipelineExited(PipelineId),
}
impl Debug for Msg {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
Msg::Exit(..) => write!(f, "Exit"),
Msg::ShutdownComplete => write!(f, "ShutdownComplete"),
Msg::GetNativeDisplay(..) => write!(f, "GetNativeDisplay"),
Msg::InitializeLayersForPipeline(..) => write!(f, "InitializeLayersForPipeline"),
Msg::ScrollFragmentPoint(..) => write!(f, "ScrollFragmentPoint"),
Msg::AssignPaintedBuffers(..) => write!(f, "AssignPaintedBuffers"),
Msg::ChangeRunningAnimationsState(..) => write!(f, "ChangeRunningAnimationsState"),
Msg::ChangePageTitle(..) => write!(f, "ChangePageTitle"),
Msg::ChangePageUrl(..) => write!(f, "ChangePageUrl"),
Msg::SetFrameTree(..) => write!(f, "SetFrameTree"),
Msg::LoadComplete(..) => write!(f, "LoadComplete"),
Msg::LoadStart(..) => write!(f, "LoadStart"),
Msg::ScrollTimeout(..) => write!(f, "ScrollTimeout"),
Msg::RecompositeAfterScroll => write!(f, "RecompositeAfterScroll"),
Msg::KeyEvent(..) => write!(f, "KeyEvent"),
Msg::TouchEventProcessed(..) => write!(f, "TouchEventProcessed"),
Msg::SetCursor(..) => write!(f, "SetCursor"),
Msg::CreatePng(..) => write!(f, "CreatePng"),
Msg::PaintTaskExited(..) => write!(f, "PaintTaskExited"),
Msg::ViewportConstrained(..) => write!(f, "ViewportConstrained"),
Msg::IsReadyToSaveImageReply(..) => write!(f, "IsReadyToSaveImageReply"),
Msg::NewFavicon(..) => write!(f, "NewFavicon"),
Msg::HeadParsed => write!(f, "HeadParsed"),
Msg::ReturnUnusedNativeSurfaces(..) => write!(f, "ReturnUnusedNativeSurfaces"),
Msg::CollectMemoryReports(..) => write!(f, "CollectMemoryReports"),
Msg::Status(..) => write!(f, "Status"),
Msg::GetClientWindow(..) => write!(f, "GetClientWindow"),
Msg::MoveTo(..) => write!(f, "MoveTo"),
Msg::ResizeTo(..) => write!(f, "ResizeTo"),
Msg::PipelineExited(..) => write!(f, "PipelineExited"),
}
}
}
pub struct CompositorTask;
impl CompositorTask {
pub fn create<Window>(window: Option<Rc<Window>>,
state: InitialCompositorState)
-> Box<CompositorEventListener +'static>
where Window: WindowMethods +'static {
match window {
Some(window) => {
box compositor::IOCompositor::create(window, state)
as Box<CompositorEventListener>
}
None => {
box headless::NullCompositor::create(state)
as Box<CompositorEventListener>
}
}
}
}
pub trait CompositorEventListener {
fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool;
fn repaint_synchronously(&mut self);
fn pinch_zoom_level(&self) -> f32;
/// Requests that the compositor send the title for the main frame as soon as possible.
fn title_for_main_frame(&self);
}
/// Data used to construct a compositor.
pub struct
|
{
/// A channel to the compositor.
pub sender: Box<CompositorProxy + Send>,
/// A port on which messages inbound to the compositor can be received.
pub receiver: Box<CompositorReceiver>,
/// A channel to the constellation.
pub constellation_chan: Sender<ConstellationMsg>,
/// A channel to the time profiler thread.
pub time_profiler_chan: time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
}
|
InitialCompositorState
|
identifier_name
|
compositor_task.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Communication with the compositor task.
use CompositorMsg as ConstellationMsg;
use compositor;
use euclid::point::Point2D;
use euclid::size::Size2D;
use gfx_traits::PaintListener;
use headless;
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use layers::layers::{BufferRequest, LayerBufferSet};
use layers::platform::surface::{NativeDisplay, NativeSurface};
use msg::compositor_msg::{Epoch, FrameTreeId, LayerId, LayerProperties};
use msg::constellation_msg::{AnimationState, PipelineId};
use msg::constellation_msg::{Image, Key, KeyModifiers, KeyState};
use profile_traits::mem;
use profile_traits::time;
use script_traits::{EventResult, ScriptToCompositorMsg};
use std::fmt::{Debug, Error, Formatter};
use std::rc::Rc;
use std::sync::mpsc::{Receiver, Sender, channel};
use style_traits::viewport::ViewportConstraints;
use url::Url;
use util::cursor::Cursor;
use windowing::{WindowEvent, WindowMethods};
pub use constellation::SendableFrameTree;
pub use windowing;
/// Sends messages to the compositor. This is a trait supplied by the port because the method used
/// to communicate with the compositor may have to kick OS event loops awake, communicate cross-
/// process, and so forth.
pub trait CompositorProxy :'static + Send {
/// Sends a message to the compositor.
fn send(&self, msg: Msg);
/// Clones the compositor proxy.
fn clone_compositor_proxy(&self) -> Box<CompositorProxy +'static + Send>;
}
/// The port that the compositor receives messages on. As above, this is a trait supplied by the
/// Servo port.
pub trait CompositorReceiver :'static {
/// Receives the next message inbound for the compositor. This must not block.
fn try_recv_compositor_msg(&mut self) -> Option<Msg>;
/// Synchronously waits for, and returns, the next message inbound for the compositor.
fn recv_compositor_msg(&mut self) -> Msg;
}
/// A convenience implementation of `CompositorReceiver` for a plain old Rust `Receiver`.
impl CompositorReceiver for Receiver<Msg> {
fn try_recv_compositor_msg(&mut self) -> Option<Msg> {
self.try_recv().ok()
}
fn recv_compositor_msg(&mut self) -> Msg {
self.recv().unwrap()
}
}
pub fn run_script_listener_thread(compositor_proxy: Box<CompositorProxy +'static + Send>,
receiver: IpcReceiver<ScriptToCompositorMsg>) {
while let Ok(msg) = receiver.recv() {
match msg {
ScriptToCompositorMsg::ScrollFragmentPoint(pipeline_id, layer_id, point, smooth) => {
compositor_proxy.send(Msg::ScrollFragmentPoint(pipeline_id,
layer_id,
point,
smooth));
}
ScriptToCompositorMsg::GetClientWindow(send) => {
compositor_proxy.send(Msg::GetClientWindow(send));
}
ScriptToCompositorMsg::MoveTo(point) => {
compositor_proxy.send(Msg::MoveTo(point));
}
ScriptToCompositorMsg::ResizeTo(size) => {
compositor_proxy.send(Msg::ResizeTo(size));
}
ScriptToCompositorMsg::Exit => {
let (chan, port) = ipc::channel().unwrap();
compositor_proxy.send(Msg::Exit(chan));
port.recv().unwrap();
}
ScriptToCompositorMsg::SetTitle(pipeline_id, title) => {
compositor_proxy.send(Msg::ChangePageTitle(pipeline_id, title))
}
ScriptToCompositorMsg::SendKeyEvent(key, key_state, key_modifiers) => {
compositor_proxy.send(Msg::KeyEvent(key, key_state, key_modifiers))
}
ScriptToCompositorMsg::TouchEventProcessed(result) => {
compositor_proxy.send(Msg::TouchEventProcessed(result))
}
}
}
}
/// Implementation of the abstract `PaintListener` interface.
impl PaintListener for Box<CompositorProxy +'static + Send> {
fn native_display(&mut self) -> Option<NativeDisplay>
|
fn assign_painted_buffers(&mut self,
pipeline_id: PipelineId,
epoch: Epoch,
replies: Vec<(LayerId, Box<LayerBufferSet>)>,
frame_tree_id: FrameTreeId) {
self.send(Msg::AssignPaintedBuffers(pipeline_id, epoch, replies, frame_tree_id));
}
fn ignore_buffer_requests(&mut self, buffer_requests: Vec<BufferRequest>) {
let mut native_surfaces = Vec::new();
for request in buffer_requests.into_iter() {
if let Some(native_surface) = request.native_surface {
native_surfaces.push(native_surface);
}
}
if!native_surfaces.is_empty() {
self.send(Msg::ReturnUnusedNativeSurfaces(native_surfaces));
}
}
fn initialize_layers_for_pipeline(&mut self,
pipeline_id: PipelineId,
properties: Vec<LayerProperties>,
epoch: Epoch) {
// FIXME(#2004, pcwalton): This assumes that the first layer determines the page size, and
// that all other layers are immediate children of it. This is sufficient to handle
// `position: fixed` but will not be sufficient to handle `overflow: scroll` or transforms.
self.send(Msg::InitializeLayersForPipeline(pipeline_id, epoch, properties));
}
fn notify_paint_task_exiting(&mut self, pipeline_id: PipelineId) {
self.send(Msg::PaintTaskExited(pipeline_id))
}
}
/// Messages from the painting task and the constellation task to the compositor task.
pub enum Msg {
/// Requests that the compositor shut down.
Exit(IpcSender<()>),
/// Informs the compositor that the constellation has completed shutdown.
/// Required because the constellation can have pending calls to make
/// (e.g. SetFrameTree) at the time that we send it an ExitMsg.
ShutdownComplete,
/// Requests the compositor's graphics metadata. Graphics metadata is what the painter needs
/// to create surfaces that the compositor can see. On Linux this is the X display; on Mac this
/// is the pixel format.
///
/// The headless compositor returns `None`.
GetNativeDisplay(Sender<Option<NativeDisplay>>),
/// Tells the compositor to create or update the layers for a pipeline if necessary
/// (i.e. if no layer with that ID exists).
InitializeLayersForPipeline(PipelineId, Epoch, Vec<LayerProperties>),
/// Scroll a page in a window
ScrollFragmentPoint(PipelineId, LayerId, Point2D<f32>, bool),
/// Requests that the compositor assign the painted buffers to the given layers.
AssignPaintedBuffers(PipelineId, Epoch, Vec<(LayerId, Box<LayerBufferSet>)>, FrameTreeId),
/// Alerts the compositor that the current page has changed its title.
ChangePageTitle(PipelineId, Option<String>),
/// Alerts the compositor that the current page has changed its URL.
ChangePageUrl(PipelineId, Url),
/// Alerts the compositor that the given pipeline has changed whether it is running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Replaces the current frame tree, typically called during main frame navigation.
SetFrameTree(SendableFrameTree, IpcSender<()>, Sender<ConstellationMsg>),
/// The load of a page has begun: (can go back, can go forward).
LoadStart(bool, bool),
/// The load of a page has completed: (can go back, can go forward).
LoadComplete(bool, bool),
/// Indicates that the scrolling timeout with the given starting timestamp has happened and a
/// composite should happen. (See the `scrolling` module.)
ScrollTimeout(u64),
RecompositeAfterScroll,
/// Sends an unconsumed key event back to the compositor.
KeyEvent(Key, KeyState, KeyModifiers),
/// Script has handled a touch event, and either prevented or allowed default actions.
TouchEventProcessed(EventResult),
/// Changes the cursor.
SetCursor(Cursor),
/// Composite to a PNG file and return the Image over a passed channel.
CreatePng(IpcSender<Option<Image>>),
/// Informs the compositor that the paint task for the given pipeline has exited.
PaintTaskExited(PipelineId),
/// Alerts the compositor that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
/// A reply to the compositor asking if the output image is stable.
IsReadyToSaveImageReply(bool),
/// A favicon was detected
NewFavicon(Url),
/// <head> tag finished parsing
HeadParsed,
/// Signal that the paint task ignored the paint requests that carried
/// these native surfaces, so that they can be re-added to the surface cache.
ReturnUnusedNativeSurfaces(Vec<NativeSurface>),
/// Collect memory reports and send them back to the given mem::ReportsChan.
CollectMemoryReports(mem::ReportsChan),
/// A status message to be displayed by the browser chrome.
Status(Option<String>),
/// Get Window Informations size and position
GetClientWindow(IpcSender<(Size2D<u32>, Point2D<i32>)>),
/// Move the window to a point
MoveTo(Point2D<i32>),
/// Resize the window to size
ResizeTo(Size2D<u32>),
/// A pipeline was shut down.
PipelineExited(PipelineId),
}
impl Debug for Msg {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
Msg::Exit(..) => write!(f, "Exit"),
Msg::ShutdownComplete => write!(f, "ShutdownComplete"),
Msg::GetNativeDisplay(..) => write!(f, "GetNativeDisplay"),
Msg::InitializeLayersForPipeline(..) => write!(f, "InitializeLayersForPipeline"),
Msg::ScrollFragmentPoint(..) => write!(f, "ScrollFragmentPoint"),
Msg::AssignPaintedBuffers(..) => write!(f, "AssignPaintedBuffers"),
Msg::ChangeRunningAnimationsState(..) => write!(f, "ChangeRunningAnimationsState"),
Msg::ChangePageTitle(..) => write!(f, "ChangePageTitle"),
Msg::ChangePageUrl(..) => write!(f, "ChangePageUrl"),
Msg::SetFrameTree(..) => write!(f, "SetFrameTree"),
Msg::LoadComplete(..) => write!(f, "LoadComplete"),
Msg::LoadStart(..) => write!(f, "LoadStart"),
Msg::ScrollTimeout(..) => write!(f, "ScrollTimeout"),
Msg::RecompositeAfterScroll => write!(f, "RecompositeAfterScroll"),
Msg::KeyEvent(..) => write!(f, "KeyEvent"),
Msg::TouchEventProcessed(..) => write!(f, "TouchEventProcessed"),
Msg::SetCursor(..) => write!(f, "SetCursor"),
Msg::CreatePng(..) => write!(f, "CreatePng"),
Msg::PaintTaskExited(..) => write!(f, "PaintTaskExited"),
Msg::ViewportConstrained(..) => write!(f, "ViewportConstrained"),
Msg::IsReadyToSaveImageReply(..) => write!(f, "IsReadyToSaveImageReply"),
Msg::NewFavicon(..) => write!(f, "NewFavicon"),
Msg::HeadParsed => write!(f, "HeadParsed"),
Msg::ReturnUnusedNativeSurfaces(..) => write!(f, "ReturnUnusedNativeSurfaces"),
Msg::CollectMemoryReports(..) => write!(f, "CollectMemoryReports"),
Msg::Status(..) => write!(f, "Status"),
Msg::GetClientWindow(..) => write!(f, "GetClientWindow"),
Msg::MoveTo(..) => write!(f, "MoveTo"),
Msg::ResizeTo(..) => write!(f, "ResizeTo"),
Msg::PipelineExited(..) => write!(f, "PipelineExited"),
}
}
}
pub struct CompositorTask;
impl CompositorTask {
pub fn create<Window>(window: Option<Rc<Window>>,
state: InitialCompositorState)
-> Box<CompositorEventListener +'static>
where Window: WindowMethods +'static {
match window {
Some(window) => {
box compositor::IOCompositor::create(window, state)
as Box<CompositorEventListener>
}
None => {
box headless::NullCompositor::create(state)
as Box<CompositorEventListener>
}
}
}
}
pub trait CompositorEventListener {
fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool;
fn repaint_synchronously(&mut self);
fn pinch_zoom_level(&self) -> f32;
/// Requests that the compositor send the title for the main frame as soon as possible.
fn title_for_main_frame(&self);
}
/// Data used to construct a compositor.
pub struct InitialCompositorState {
/// A channel to the compositor.
pub sender: Box<CompositorProxy + Send>,
/// A port on which messages inbound to the compositor can be received.
pub receiver: Box<CompositorReceiver>,
/// A channel to the constellation.
pub constellation_chan: Sender<ConstellationMsg>,
/// A channel to the time profiler thread.
pub time_profiler_chan: time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
}
|
{
let (chan, port) = channel();
self.send(Msg::GetNativeDisplay(chan));
// If the compositor is shutting down when a paint task
// is being created, the compositor won't respond to
// this message, resulting in an eventual panic. Instead,
// just return None in this case, since the paint task
// will exit shortly and never actually be requested
// to paint buffers by the compositor.
port.recv().unwrap_or(None)
}
|
identifier_body
|
sysinfo.rs
|
use libc::{self, SI_LOAD_SHIFT};
use std::{cmp, mem};
use std::time::Duration;
use Result;
use errno::Errno;
/// System info structure returned by `sysinfo`.
#[derive(Copy, Clone)]
#[allow(missing_debug_implementations)] // libc::sysinfo doesn't impl Debug
pub struct SysInfo(libc::sysinfo);
impl SysInfo {
/// Returns the load average tuple.
///
/// The returned values represent the load average over time intervals of
/// 1, 5, and 15 minutes, respectively.
pub fn load_average(&self) -> (f64, f64, f64) {
(
self.0.loads[0] as f64 / (1 << SI_LOAD_SHIFT) as f64,
self.0.loads[1] as f64 / (1 << SI_LOAD_SHIFT) as f64,
self.0.loads[2] as f64 / (1 << SI_LOAD_SHIFT) as f64,
)
}
/// Returns the time since system boot.
pub fn uptime(&self) -> Duration {
|
/// Current number of processes.
pub fn process_count(&self) -> u16 {
self.0.procs
}
/// Returns the amount of swap memory in Bytes.
pub fn swap_total(&self) -> u64 {
self.scale_mem(self.0.totalswap)
}
/// Returns the amount of unused swap memory in Bytes.
pub fn swap_free(&self) -> u64 {
self.scale_mem(self.0.freeswap)
}
/// Returns the total amount of installed RAM in Bytes.
pub fn ram_total(&self) -> u64 {
self.scale_mem(self.0.totalram)
}
/// Returns the amount of completely unused RAM in Bytes.
///
/// "Unused" in this context means that the RAM in neither actively used by
/// programs, nor by the operating system as disk cache or buffer. It is
/// "wasted" RAM since it currently serves no purpose.
pub fn ram_unused(&self) -> u64 {
self.scale_mem(self.0.freeram)
}
fn scale_mem(&self, units: libc::c_ulong) -> u64 {
units as u64 * self.0.mem_unit as u64
}
}
/// Returns system information.
///
/// [See `sysinfo(2)`](http://man7.org/linux/man-pages/man2/sysinfo.2.html).
pub fn sysinfo() -> Result<SysInfo> {
let mut info: libc::sysinfo = unsafe { mem::uninitialized() };
let res = unsafe { libc::sysinfo(&mut info) };
Errno::result(res).map(|_| SysInfo(info))
}
|
// Truncate negative values to 0
Duration::from_secs(cmp::max(self.0.uptime, 0) as u64)
}
|
random_line_split
|
sysinfo.rs
|
use libc::{self, SI_LOAD_SHIFT};
use std::{cmp, mem};
use std::time::Duration;
use Result;
use errno::Errno;
/// System info structure returned by `sysinfo`.
#[derive(Copy, Clone)]
#[allow(missing_debug_implementations)] // libc::sysinfo doesn't impl Debug
pub struct SysInfo(libc::sysinfo);
impl SysInfo {
/// Returns the load average tuple.
///
/// The returned values represent the load average over time intervals of
/// 1, 5, and 15 minutes, respectively.
pub fn load_average(&self) -> (f64, f64, f64) {
(
self.0.loads[0] as f64 / (1 << SI_LOAD_SHIFT) as f64,
self.0.loads[1] as f64 / (1 << SI_LOAD_SHIFT) as f64,
self.0.loads[2] as f64 / (1 << SI_LOAD_SHIFT) as f64,
)
}
/// Returns the time since system boot.
pub fn uptime(&self) -> Duration {
// Truncate negative values to 0
Duration::from_secs(cmp::max(self.0.uptime, 0) as u64)
}
/// Current number of processes.
pub fn process_count(&self) -> u16 {
self.0.procs
}
/// Returns the amount of swap memory in Bytes.
pub fn swap_total(&self) -> u64 {
self.scale_mem(self.0.totalswap)
}
/// Returns the amount of unused swap memory in Bytes.
pub fn swap_free(&self) -> u64 {
self.scale_mem(self.0.freeswap)
}
/// Returns the total amount of installed RAM in Bytes.
pub fn
|
(&self) -> u64 {
self.scale_mem(self.0.totalram)
}
/// Returns the amount of completely unused RAM in Bytes.
///
/// "Unused" in this context means that the RAM in neither actively used by
/// programs, nor by the operating system as disk cache or buffer. It is
/// "wasted" RAM since it currently serves no purpose.
pub fn ram_unused(&self) -> u64 {
self.scale_mem(self.0.freeram)
}
fn scale_mem(&self, units: libc::c_ulong) -> u64 {
units as u64 * self.0.mem_unit as u64
}
}
/// Returns system information.
///
/// [See `sysinfo(2)`](http://man7.org/linux/man-pages/man2/sysinfo.2.html).
pub fn sysinfo() -> Result<SysInfo> {
let mut info: libc::sysinfo = unsafe { mem::uninitialized() };
let res = unsafe { libc::sysinfo(&mut info) };
Errno::result(res).map(|_| SysInfo(info))
}
|
ram_total
|
identifier_name
|
edge_outside.rs
|
use crate::mock_graph::{
arbitrary::{GuidedArbGraph, Limit, VertexOutside},
MockVertex, TestGraph,
};
use graphene::{
core::{Ensure, Graph},
impl_ensurer,
};
use quickcheck::{Arbitrary, Gen};
use std::collections::HashSet;
/// An arbitrary graph and two vertices where at least one is not in the graph.
#[derive(Clone, Debug)]
pub struct
|
<G>(pub G, pub MockVertex, pub MockVertex)
where
G: GuidedArbGraph,
G::Graph: TestGraph;
impl<G> Ensure for EdgeOutside<G>
where
G: GuidedArbGraph,
G::Graph: TestGraph,
{
fn ensure_unvalidated(_c: Self::Ensured, _: ()) -> Self
{
unimplemented!()
}
fn validate(_c: &Self::Ensured, _: &()) -> bool
{
unimplemented!()
}
}
impl_ensurer! {
use<G> EdgeOutside<G>: Ensure
as (self.0): G
where
G: GuidedArbGraph,
G::Graph: TestGraph
}
impl<Gr> GuidedArbGraph for EdgeOutside<Gr>
where
Gr: GuidedArbGraph,
Gr::Graph: TestGraph,
{
fn choose_size<G: Gen>(
g: &mut G,
v_min: usize,
v_max: usize,
e_min: usize,
e_max: usize,
) -> (usize, usize)
{
Gr::choose_size(g, v_min, v_max, e_min, e_max)
}
fn arbitrary_fixed<G: Gen>(g: &mut G, v_count: usize, e_count: usize) -> Self
{
let single_invalid = VertexOutside::arbitrary_fixed(g, v_count, e_count);
Self(single_invalid.0, single_invalid.1, MockVertex::arbitrary(g))
}
fn shrink_guided(&self, limits: HashSet<Limit>) -> Box<dyn Iterator<Item = Self>>
{
let mut result = Vec::new();
// Shrink the graph, keeping only the shrunk graphs where the edge is still
// invalid.
result.extend(
self.0
.shrink_guided(limits.clone())
.filter(|g| {
!g.graph().contains_vertex(self.1) ||!g.graph().contains_vertex(self.2)
})
.map(|g| Self(g, self.1, self.2)),
);
// We then shrink the vertices, ensuring that at least one of them stays invalid
result.extend(
self.1
.shrink()
.filter(|v| {
!self.0.graph().contains_vertex(v) ||!self.0.graph().contains_vertex(self.2)
})
.map(|v| Self(self.0.clone(), v, self.2)),
);
result.extend(
self.2
.shrink()
.filter(|v| {
!self.0.graph().contains_vertex(self.1) ||!self.0.graph().contains_vertex(v)
})
.map(|v| Self(self.0.clone(), self.1, v)),
);
Box::new(result.into_iter())
}
}
|
EdgeOutside
|
identifier_name
|
edge_outside.rs
|
use crate::mock_graph::{
arbitrary::{GuidedArbGraph, Limit, VertexOutside},
MockVertex, TestGraph,
};
use graphene::{
core::{Ensure, Graph},
impl_ensurer,
};
use quickcheck::{Arbitrary, Gen};
use std::collections::HashSet;
/// An arbitrary graph and two vertices where at least one is not in the graph.
#[derive(Clone, Debug)]
pub struct EdgeOutside<G>(pub G, pub MockVertex, pub MockVertex)
where
G: GuidedArbGraph,
G::Graph: TestGraph;
|
where
G: GuidedArbGraph,
G::Graph: TestGraph,
{
fn ensure_unvalidated(_c: Self::Ensured, _: ()) -> Self
{
unimplemented!()
}
fn validate(_c: &Self::Ensured, _: &()) -> bool
{
unimplemented!()
}
}
impl_ensurer! {
use<G> EdgeOutside<G>: Ensure
as (self.0): G
where
G: GuidedArbGraph,
G::Graph: TestGraph
}
impl<Gr> GuidedArbGraph for EdgeOutside<Gr>
where
Gr: GuidedArbGraph,
Gr::Graph: TestGraph,
{
fn choose_size<G: Gen>(
g: &mut G,
v_min: usize,
v_max: usize,
e_min: usize,
e_max: usize,
) -> (usize, usize)
{
Gr::choose_size(g, v_min, v_max, e_min, e_max)
}
fn arbitrary_fixed<G: Gen>(g: &mut G, v_count: usize, e_count: usize) -> Self
{
let single_invalid = VertexOutside::arbitrary_fixed(g, v_count, e_count);
Self(single_invalid.0, single_invalid.1, MockVertex::arbitrary(g))
}
fn shrink_guided(&self, limits: HashSet<Limit>) -> Box<dyn Iterator<Item = Self>>
{
let mut result = Vec::new();
// Shrink the graph, keeping only the shrunk graphs where the edge is still
// invalid.
result.extend(
self.0
.shrink_guided(limits.clone())
.filter(|g| {
!g.graph().contains_vertex(self.1) ||!g.graph().contains_vertex(self.2)
})
.map(|g| Self(g, self.1, self.2)),
);
// We then shrink the vertices, ensuring that at least one of them stays invalid
result.extend(
self.1
.shrink()
.filter(|v| {
!self.0.graph().contains_vertex(v) ||!self.0.graph().contains_vertex(self.2)
})
.map(|v| Self(self.0.clone(), v, self.2)),
);
result.extend(
self.2
.shrink()
.filter(|v| {
!self.0.graph().contains_vertex(self.1) ||!self.0.graph().contains_vertex(v)
})
.map(|v| Self(self.0.clone(), self.1, v)),
);
Box::new(result.into_iter())
}
}
|
impl<G> Ensure for EdgeOutside<G>
|
random_line_split
|
edge_outside.rs
|
use crate::mock_graph::{
arbitrary::{GuidedArbGraph, Limit, VertexOutside},
MockVertex, TestGraph,
};
use graphene::{
core::{Ensure, Graph},
impl_ensurer,
};
use quickcheck::{Arbitrary, Gen};
use std::collections::HashSet;
/// An arbitrary graph and two vertices where at least one is not in the graph.
#[derive(Clone, Debug)]
pub struct EdgeOutside<G>(pub G, pub MockVertex, pub MockVertex)
where
G: GuidedArbGraph,
G::Graph: TestGraph;
impl<G> Ensure for EdgeOutside<G>
where
G: GuidedArbGraph,
G::Graph: TestGraph,
{
fn ensure_unvalidated(_c: Self::Ensured, _: ()) -> Self
{
unimplemented!()
}
fn validate(_c: &Self::Ensured, _: &()) -> bool
{
unimplemented!()
}
}
impl_ensurer! {
use<G> EdgeOutside<G>: Ensure
as (self.0): G
where
G: GuidedArbGraph,
G::Graph: TestGraph
}
impl<Gr> GuidedArbGraph for EdgeOutside<Gr>
where
Gr: GuidedArbGraph,
Gr::Graph: TestGraph,
{
fn choose_size<G: Gen>(
g: &mut G,
v_min: usize,
v_max: usize,
e_min: usize,
e_max: usize,
) -> (usize, usize)
{
Gr::choose_size(g, v_min, v_max, e_min, e_max)
}
fn arbitrary_fixed<G: Gen>(g: &mut G, v_count: usize, e_count: usize) -> Self
{
let single_invalid = VertexOutside::arbitrary_fixed(g, v_count, e_count);
Self(single_invalid.0, single_invalid.1, MockVertex::arbitrary(g))
}
fn shrink_guided(&self, limits: HashSet<Limit>) -> Box<dyn Iterator<Item = Self>>
|
.map(|v| Self(self.0.clone(), v, self.2)),
);
result.extend(
self.2
.shrink()
.filter(|v| {
!self.0.graph().contains_vertex(self.1) ||!self.0.graph().contains_vertex(v)
})
.map(|v| Self(self.0.clone(), self.1, v)),
);
Box::new(result.into_iter())
}
}
|
{
let mut result = Vec::new();
// Shrink the graph, keeping only the shrunk graphs where the edge is still
// invalid.
result.extend(
self.0
.shrink_guided(limits.clone())
.filter(|g| {
!g.graph().contains_vertex(self.1) || !g.graph().contains_vertex(self.2)
})
.map(|g| Self(g, self.1, self.2)),
);
// We then shrink the vertices, ensuring that at least one of them stays invalid
result.extend(
self.1
.shrink()
.filter(|v| {
!self.0.graph().contains_vertex(v) || !self.0.graph().contains_vertex(self.2)
})
|
identifier_body
|
wrapping_square.rs
|
use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base_test_util::generators::{signed_gen, unsigned_gen};
#[test]
fn test_wrapping_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.wrapping_square(), out);
let mut x = x;
x.wrapping_square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
test::<u16>(1000, 16960);
test::<i16>(-1000, 16960);
}
fn wrapping_square_properties_helper_unsigned<T: PrimitiveUnsigned>() {
unsigned_gen::<T>().test_properties(|x| {
let mut square = x;
square.wrapping_square_assign();
assert_eq!(square, x.wrapping_square());
assert_eq!(square, x.wrapping_pow(2));
});
}
fn wrapping_square_properties_helper_signed<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|x| {
let mut square = x;
square.wrapping_square_assign();
assert_eq!(square, x.wrapping_square());
assert_eq!(square, x.wrapping_pow(2));
if x!= T::MIN {
assert_eq!((-x).wrapping_square(), square);
}
});
}
#[test]
fn saturating_square_properties()
|
{
apply_fn_to_unsigneds!(wrapping_square_properties_helper_unsigned);
apply_fn_to_signeds!(wrapping_square_properties_helper_signed);
}
|
identifier_body
|
|
wrapping_square.rs
|
use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base_test_util::generators::{signed_gen, unsigned_gen};
#[test]
fn test_wrapping_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.wrapping_square(), out);
let mut x = x;
x.wrapping_square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
test::<u16>(1000, 16960);
test::<i16>(-1000, 16960);
}
fn
|
<T: PrimitiveUnsigned>() {
unsigned_gen::<T>().test_properties(|x| {
let mut square = x;
square.wrapping_square_assign();
assert_eq!(square, x.wrapping_square());
assert_eq!(square, x.wrapping_pow(2));
});
}
fn wrapping_square_properties_helper_signed<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|x| {
let mut square = x;
square.wrapping_square_assign();
assert_eq!(square, x.wrapping_square());
assert_eq!(square, x.wrapping_pow(2));
if x!= T::MIN {
assert_eq!((-x).wrapping_square(), square);
}
});
}
#[test]
fn saturating_square_properties() {
apply_fn_to_unsigneds!(wrapping_square_properties_helper_unsigned);
apply_fn_to_signeds!(wrapping_square_properties_helper_signed);
}
|
wrapping_square_properties_helper_unsigned
|
identifier_name
|
wrapping_square.rs
|
use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base_test_util::generators::{signed_gen, unsigned_gen};
#[test]
fn test_wrapping_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.wrapping_square(), out);
let mut x = x;
x.wrapping_square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
test::<u16>(1000, 16960);
test::<i16>(-1000, 16960);
}
fn wrapping_square_properties_helper_unsigned<T: PrimitiveUnsigned>() {
unsigned_gen::<T>().test_properties(|x| {
let mut square = x;
square.wrapping_square_assign();
assert_eq!(square, x.wrapping_square());
assert_eq!(square, x.wrapping_pow(2));
});
}
fn wrapping_square_properties_helper_signed<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|x| {
let mut square = x;
square.wrapping_square_assign();
assert_eq!(square, x.wrapping_square());
assert_eq!(square, x.wrapping_pow(2));
if x!= T::MIN
|
});
}
#[test]
fn saturating_square_properties() {
apply_fn_to_unsigneds!(wrapping_square_properties_helper_unsigned);
apply_fn_to_signeds!(wrapping_square_properties_helper_signed);
}
|
{
assert_eq!((-x).wrapping_square(), square);
}
|
conditional_block
|
wrapping_square.rs
|
use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base_test_util::generators::{signed_gen, unsigned_gen};
#[test]
fn test_wrapping_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.wrapping_square(), out);
let mut x = x;
x.wrapping_square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
test::<u16>(1000, 16960);
test::<i16>(-1000, 16960);
}
|
assert_eq!(square, x.wrapping_square());
assert_eq!(square, x.wrapping_pow(2));
});
}
fn wrapping_square_properties_helper_signed<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|x| {
let mut square = x;
square.wrapping_square_assign();
assert_eq!(square, x.wrapping_square());
assert_eq!(square, x.wrapping_pow(2));
if x!= T::MIN {
assert_eq!((-x).wrapping_square(), square);
}
});
}
#[test]
fn saturating_square_properties() {
apply_fn_to_unsigneds!(wrapping_square_properties_helper_unsigned);
apply_fn_to_signeds!(wrapping_square_properties_helper_signed);
}
|
fn wrapping_square_properties_helper_unsigned<T: PrimitiveUnsigned>() {
unsigned_gen::<T>().test_properties(|x| {
let mut square = x;
square.wrapping_square_assign();
|
random_line_split
|
eina_iterator.rs
|
#![allow(dead_code)]
use eina_ffi::*;
use libc::*;
use std::ptr;
pub struct EinaIterator {
pub ptr: *mut Eina_Iterator,
}
impl EinaIterator {
/// Return the container of an iterator
pub fn get_container(&mut self) -> Option<&mut c_void> {
unsafe {
let container = eina_iterator_container_get(self.ptr);
match container.is_null() {
false => Some(&mut*container),
true => None,
}
}
}
/// Return the value of the current element and go to the next one
pub fn next(&mut self) -> Option<&mut c_void> {
unsafe {
let mut data: *mut c_void = ptr::null_mut();
match eina_iterator_next(self.ptr, &mut data as *mut *mut _ as *mut *mut c_void) {
EINA_TRUE => Some(&mut *data),
_ => None,
}
}
}
/// Iterate over the container and execute a callback on each element
pub fn foreach<T>(&mut self, callback: EinaEachCb, fdata: &T) {
unsafe {
eina_iterator_foreach(self.ptr, callback, fdata as *const _ as *const c_void);
}
}
/// Lock the container of the iterator
///
/// Warning: None of the existing eina data structures are lockable
pub fn lock(&mut self) -> bool
|
/// Unlock the container of the iterator
///
/// Warning: None of the existing eina data structures are lockable
pub fn unlock(&mut self) -> bool {
unsafe {
match eina_iterator_unlock(self.ptr) {
EINA_TRUE => true,
_ => false,
}
}
}
}
impl Drop for EinaIterator {
fn drop(&mut self) {
unsafe {
eina_iterator_free(self.ptr)
}
}
}
|
{
unsafe {
match eina_iterator_lock(self.ptr) {
EINA_TRUE => true,
_ => false,
}
}
}
|
identifier_body
|
eina_iterator.rs
|
#![allow(dead_code)]
use eina_ffi::*;
use libc::*;
use std::ptr;
pub struct EinaIterator {
pub ptr: *mut Eina_Iterator,
}
impl EinaIterator {
/// Return the container of an iterator
pub fn get_container(&mut self) -> Option<&mut c_void> {
unsafe {
let container = eina_iterator_container_get(self.ptr);
match container.is_null() {
false => Some(&mut*container),
true => None,
}
}
}
/// Return the value of the current element and go to the next one
pub fn next(&mut self) -> Option<&mut c_void> {
unsafe {
let mut data: *mut c_void = ptr::null_mut();
match eina_iterator_next(self.ptr, &mut data as *mut *mut _ as *mut *mut c_void) {
EINA_TRUE => Some(&mut *data),
_ => None,
}
}
}
/// Iterate over the container and execute a callback on each element
pub fn foreach<T>(&mut self, callback: EinaEachCb, fdata: &T) {
unsafe {
eina_iterator_foreach(self.ptr, callback, fdata as *const _ as *const c_void);
}
}
/// Lock the container of the iterator
///
/// Warning: None of the existing eina data structures are lockable
pub fn lock(&mut self) -> bool {
unsafe {
match eina_iterator_lock(self.ptr) {
EINA_TRUE => true,
_ => false,
}
}
}
/// Unlock the container of the iterator
///
/// Warning: None of the existing eina data structures are lockable
pub fn unlock(&mut self) -> bool {
unsafe {
match eina_iterator_unlock(self.ptr) {
EINA_TRUE => true,
_ => false,
}
}
}
}
impl Drop for EinaIterator {
fn drop(&mut self) {
unsafe {
eina_iterator_free(self.ptr)
}
|
}
}
|
random_line_split
|
|
eina_iterator.rs
|
#![allow(dead_code)]
use eina_ffi::*;
use libc::*;
use std::ptr;
pub struct EinaIterator {
pub ptr: *mut Eina_Iterator,
}
impl EinaIterator {
/// Return the container of an iterator
pub fn get_container(&mut self) -> Option<&mut c_void> {
unsafe {
let container = eina_iterator_container_get(self.ptr);
match container.is_null() {
false => Some(&mut*container),
true => None,
}
}
}
/// Return the value of the current element and go to the next one
pub fn next(&mut self) -> Option<&mut c_void> {
unsafe {
let mut data: *mut c_void = ptr::null_mut();
match eina_iterator_next(self.ptr, &mut data as *mut *mut _ as *mut *mut c_void) {
EINA_TRUE => Some(&mut *data),
_ => None,
}
}
}
/// Iterate over the container and execute a callback on each element
pub fn foreach<T>(&mut self, callback: EinaEachCb, fdata: &T) {
unsafe {
eina_iterator_foreach(self.ptr, callback, fdata as *const _ as *const c_void);
}
}
/// Lock the container of the iterator
///
/// Warning: None of the existing eina data structures are lockable
pub fn lock(&mut self) -> bool {
unsafe {
match eina_iterator_lock(self.ptr) {
EINA_TRUE => true,
_ => false,
}
}
}
/// Unlock the container of the iterator
///
/// Warning: None of the existing eina data structures are lockable
pub fn unlock(&mut self) -> bool {
unsafe {
match eina_iterator_unlock(self.ptr) {
EINA_TRUE => true,
_ => false,
}
}
}
}
impl Drop for EinaIterator {
fn
|
(&mut self) {
unsafe {
eina_iterator_free(self.ptr)
}
}
}
|
drop
|
identifier_name
|
htype.rs
|
use super::{Result, Error};
#[derive(Debug, PartialEq)]
#[allow(non_camel_case_types)]
pub enum
|
{
Ethernet_10mb = 1,
Experimental_Ethernet_3mb,
Amateur_Radio_AX_25,
Proteon_ProNET_Token_Ring,
Chaos,
IEEE_802_Networks,
Arcnet,
Hyperchannel,
Lanstar,
Autonet_Short_Address,
LocalTalk,
LocalNet,
Ultra_link,
SMDS,
Frame_Relay,
Asynchronous_Transmission_Mode,
}
impl Htype {
pub fn from_byte(byte: u8) -> Result<Htype> {
match byte {
1u8 => Ok(Htype::Ethernet_10mb),
2u8 => Ok(Htype::Experimental_Ethernet_3mb),
3u8 => Ok(Htype::Amateur_Radio_AX_25),
4u8 => Ok(Htype::Proteon_ProNET_Token_Ring),
5u8 => Ok(Htype::Chaos),
6u8 => Ok(Htype::IEEE_802_Networks),
7u8 => Ok(Htype::Arcnet),
8u8 => Ok(Htype::Hyperchannel),
9u8 => Ok(Htype::Lanstar),
10u8 => Ok(Htype::Autonet_Short_Address),
11u8 => Ok(Htype::LocalTalk),
12u8 => Ok(Htype::LocalNet),
13u8 => Ok(Htype::Ultra_link),
14u8 => Ok(Htype::SMDS),
15u8 => Ok(Htype::Frame_Relay),
16u8 => Ok(Htype::Asynchronous_Transmission_Mode),
_ => Err(Error::ParseError(format!("Unknown Htype {:?}", byte)))
}
}
}
|
Htype
|
identifier_name
|
htype.rs
|
use super::{Result, Error};
#[derive(Debug, PartialEq)]
#[allow(non_camel_case_types)]
pub enum Htype {
Ethernet_10mb = 1,
Experimental_Ethernet_3mb,
Amateur_Radio_AX_25,
Proteon_ProNET_Token_Ring,
Chaos,
IEEE_802_Networks,
Arcnet,
Hyperchannel,
Lanstar,
Autonet_Short_Address,
LocalTalk,
LocalNet,
Ultra_link,
SMDS,
Frame_Relay,
Asynchronous_Transmission_Mode,
}
impl Htype {
pub fn from_byte(byte: u8) -> Result<Htype> {
match byte {
1u8 => Ok(Htype::Ethernet_10mb),
2u8 => Ok(Htype::Experimental_Ethernet_3mb),
3u8 => Ok(Htype::Amateur_Radio_AX_25),
4u8 => Ok(Htype::Proteon_ProNET_Token_Ring),
5u8 => Ok(Htype::Chaos),
6u8 => Ok(Htype::IEEE_802_Networks),
7u8 => Ok(Htype::Arcnet),
8u8 => Ok(Htype::Hyperchannel),
9u8 => Ok(Htype::Lanstar),
10u8 => Ok(Htype::Autonet_Short_Address),
11u8 => Ok(Htype::LocalTalk),
12u8 => Ok(Htype::LocalNet),
13u8 => Ok(Htype::Ultra_link),
14u8 => Ok(Htype::SMDS),
|
15u8 => Ok(Htype::Frame_Relay),
16u8 => Ok(Htype::Asynchronous_Transmission_Mode),
_ => Err(Error::ParseError(format!("Unknown Htype {:?}", byte)))
}
}
}
|
random_line_split
|
|
thread_comm.rs
|
extern crate alloc;
use core::ptr::{self};
use std::sync::{Arc,RwLock};
//use std::sync::{Barrier};
use std::sync::atomic::{AtomicPtr,AtomicUsize,AtomicBool,Ordering};
pub struct ThreadComm<T> {
n_threads: usize,
//Slot has a MatrixBuffer, to be broadcast
slot: AtomicPtr<T>,
//Slot_reads represents the number of times slot has been read.
//If slot_reads == n_threads, then it is ready to be written to.
//If slot_reads < n_threads, it is ready to be read.
//Each thread is only allowed to read from the slot one time.
//It is incremented every time slot is read,
//And it is an integer modulo n_threads
slot_reads: AtomicUsize,
//barrier: Barrier,
//Stuff for barriers
barrier_sense: AtomicBool,
barrier_threads_arrived: AtomicUsize,
//I guess subcomms needs to have interor mutability?
//sub_comms: Vec<AtomicPtr<Arc<ThreadComm<T>>>>,
sub_comms: Vec<RwLock<Option<Arc<ThreadComm<T>>>>>,
}
impl<T> ThreadComm<T> {
pub fn new(n_threads: usize) -> ThreadComm<T> {
let init_ptr: *const T = ptr::null();
let mut sub_comms = Vec::with_capacity(n_threads);
for _ in 0..n_threads {
sub_comms.push(RwLock::new(Option::None));
}
ThreadComm{ n_threads: n_threads,
slot: AtomicPtr::new(init_ptr as *mut T),
slot_reads: AtomicUsize::new(n_threads),
// barrier: Barrier::new(n_threads),
barrier_sense: AtomicBool::new(false),
barrier_threads_arrived: AtomicUsize::new(0),
sub_comms: sub_comms,
}
}
fn barrier(&self, _thread_id: usize) {
if self.n_threads == 1 {
return;
}
let my_sense = self.barrier_sense.load(Ordering::Relaxed);
let my_threads_arrived = self.barrier_threads_arrived.fetch_add(1,Ordering::Relaxed);
if my_threads_arrived == self.n_threads-1 {
self.barrier_threads_arrived.store(0,Ordering::Relaxed);
self.barrier_sense.fetch_xor(true, Ordering::Relaxed);
} else {
while self.barrier_sense.load(Ordering::Relaxed) == my_sense { }
}
//self.barrier.wait();
}
fn broadcast(&self, info: &ThreadInfo<T>, to_send: *mut T) -> *mut T {
if info.thread_id == 0 {
//Spin while waiting for the thread communicator to be ready to broadcast
while self.slot_reads.load(Ordering::Relaxed)!= self.n_threads {
}
self.slot.store(to_send, Ordering::Relaxed);
self.slot_reads.store(0, Ordering::Relaxed);
}
//Spin while waiting for the thread communicator chief to broadcast
while self.slot_reads.load(Ordering::Relaxed) == self.n_threads {
}
self.slot_reads.fetch_add(1, Ordering::Relaxed);
self.slot.load(Ordering::Relaxed)
/*
self.barrier(info.thread_id);
if info.thread_id == 0 {
self.slot.store(to_send, Ordering::Relaxed);
}
self.barrier(info.thread_id);
self.slot.load(Ordering::Relaxed)*/
}
//Pretty sure with this implementation, split can only be called one time.
fn split(&self, thread_id: usize, n_way: usize) -> Arc<ThreadComm<T>> {
assert_eq!(self.n_threads % n_way, 0);
let subcomm_n_threads = self.n_threads / n_way;
let sub_comm_number = thread_id / subcomm_n_threads; // Which subcomm are we going to use?
let sub_comm_id = thread_id % subcomm_n_threads; // What is our id within the subcomm?
self.barrier(thread_id);
if sub_comm_id == 0 {
let mut sub_comm = self.sub_comms[sub_comm_number].write().unwrap();
*sub_comm = Option::Some(Arc::new(ThreadComm::new(subcomm_n_threads)));
}
self.barrier(thread_id);
let comm = self.sub_comms[sub_comm_number].read().unwrap().clone();
comm.unwrap()
}
}
//unsafe impl Sync for ThreadComm {}
//unsafe impl Send for ThreadComm {}
pub struct ThreadInfo<T> {
thread_id: usize,
comm: Arc<ThreadComm<T>>,
}
impl<T> ThreadInfo<T> {
pub fn new( id: usize, comm: Arc<ThreadComm<T>> ) -> ThreadInfo<T> {
ThreadInfo{ thread_id: id, comm: comm }
}
pub fn single_thread() -> ThreadInfo<T>{
ThreadInfo{ thread_id : 0, comm : Arc::new(ThreadComm::new(1)) }
}
pub fn barrier(&self) {
self.comm.barrier(self.thread_id);
}
pub fn broadcast(&self, to_send: *mut T) -> *mut T {
self.comm.broadcast(self, to_send)
}
pub fn num_threads(&self) -> usize { self.comm.n_threads }
pub fn thread_id(&self) -> usize
|
pub fn split(&self, n_way: usize) -> ThreadInfo<T> {
let subcomm = self.comm.split(self.thread_id, n_way);
let subcomm_id = self.thread_id % (self.comm.n_threads / n_way);
ThreadInfo{ thread_id: subcomm_id, comm: subcomm }
}
}
|
{ self.thread_id }
|
identifier_body
|
thread_comm.rs
|
extern crate alloc;
use core::ptr::{self};
use std::sync::{Arc,RwLock};
//use std::sync::{Barrier};
use std::sync::atomic::{AtomicPtr,AtomicUsize,AtomicBool,Ordering};
pub struct ThreadComm<T> {
n_threads: usize,
//Slot has a MatrixBuffer, to be broadcast
slot: AtomicPtr<T>,
//Slot_reads represents the number of times slot has been read.
//If slot_reads == n_threads, then it is ready to be written to.
//If slot_reads < n_threads, it is ready to be read.
//Each thread is only allowed to read from the slot one time.
//It is incremented every time slot is read,
//And it is an integer modulo n_threads
slot_reads: AtomicUsize,
//barrier: Barrier,
//Stuff for barriers
barrier_sense: AtomicBool,
barrier_threads_arrived: AtomicUsize,
//I guess subcomms needs to have interor mutability?
//sub_comms: Vec<AtomicPtr<Arc<ThreadComm<T>>>>,
sub_comms: Vec<RwLock<Option<Arc<ThreadComm<T>>>>>,
}
impl<T> ThreadComm<T> {
pub fn new(n_threads: usize) -> ThreadComm<T> {
let init_ptr: *const T = ptr::null();
let mut sub_comms = Vec::with_capacity(n_threads);
for _ in 0..n_threads {
sub_comms.push(RwLock::new(Option::None));
}
ThreadComm{ n_threads: n_threads,
slot: AtomicPtr::new(init_ptr as *mut T),
slot_reads: AtomicUsize::new(n_threads),
// barrier: Barrier::new(n_threads),
barrier_sense: AtomicBool::new(false),
barrier_threads_arrived: AtomicUsize::new(0),
sub_comms: sub_comms,
}
}
fn barrier(&self, _thread_id: usize) {
if self.n_threads == 1 {
return;
}
let my_sense = self.barrier_sense.load(Ordering::Relaxed);
let my_threads_arrived = self.barrier_threads_arrived.fetch_add(1,Ordering::Relaxed);
if my_threads_arrived == self.n_threads-1 {
self.barrier_threads_arrived.store(0,Ordering::Relaxed);
self.barrier_sense.fetch_xor(true, Ordering::Relaxed);
} else
|
//self.barrier.wait();
}
fn broadcast(&self, info: &ThreadInfo<T>, to_send: *mut T) -> *mut T {
if info.thread_id == 0 {
//Spin while waiting for the thread communicator to be ready to broadcast
while self.slot_reads.load(Ordering::Relaxed)!= self.n_threads {
}
self.slot.store(to_send, Ordering::Relaxed);
self.slot_reads.store(0, Ordering::Relaxed);
}
//Spin while waiting for the thread communicator chief to broadcast
while self.slot_reads.load(Ordering::Relaxed) == self.n_threads {
}
self.slot_reads.fetch_add(1, Ordering::Relaxed);
self.slot.load(Ordering::Relaxed)
/*
self.barrier(info.thread_id);
if info.thread_id == 0 {
self.slot.store(to_send, Ordering::Relaxed);
}
self.barrier(info.thread_id);
self.slot.load(Ordering::Relaxed)*/
}
//Pretty sure with this implementation, split can only be called one time.
fn split(&self, thread_id: usize, n_way: usize) -> Arc<ThreadComm<T>> {
assert_eq!(self.n_threads % n_way, 0);
let subcomm_n_threads = self.n_threads / n_way;
let sub_comm_number = thread_id / subcomm_n_threads; // Which subcomm are we going to use?
let sub_comm_id = thread_id % subcomm_n_threads; // What is our id within the subcomm?
self.barrier(thread_id);
if sub_comm_id == 0 {
let mut sub_comm = self.sub_comms[sub_comm_number].write().unwrap();
*sub_comm = Option::Some(Arc::new(ThreadComm::new(subcomm_n_threads)));
}
self.barrier(thread_id);
let comm = self.sub_comms[sub_comm_number].read().unwrap().clone();
comm.unwrap()
}
}
//unsafe impl Sync for ThreadComm {}
//unsafe impl Send for ThreadComm {}
pub struct ThreadInfo<T> {
thread_id: usize,
comm: Arc<ThreadComm<T>>,
}
impl<T> ThreadInfo<T> {
pub fn new( id: usize, comm: Arc<ThreadComm<T>> ) -> ThreadInfo<T> {
ThreadInfo{ thread_id: id, comm: comm }
}
pub fn single_thread() -> ThreadInfo<T>{
ThreadInfo{ thread_id : 0, comm : Arc::new(ThreadComm::new(1)) }
}
pub fn barrier(&self) {
self.comm.barrier(self.thread_id);
}
pub fn broadcast(&self, to_send: *mut T) -> *mut T {
self.comm.broadcast(self, to_send)
}
pub fn num_threads(&self) -> usize { self.comm.n_threads }
pub fn thread_id(&self) -> usize { self.thread_id }
pub fn split(&self, n_way: usize) -> ThreadInfo<T> {
let subcomm = self.comm.split(self.thread_id, n_way);
let subcomm_id = self.thread_id % (self.comm.n_threads / n_way);
ThreadInfo{ thread_id: subcomm_id, comm: subcomm }
}
}
|
{
while self.barrier_sense.load(Ordering::Relaxed) == my_sense { }
}
|
conditional_block
|
thread_comm.rs
|
extern crate alloc;
use core::ptr::{self};
use std::sync::{Arc,RwLock};
//use std::sync::{Barrier};
use std::sync::atomic::{AtomicPtr,AtomicUsize,AtomicBool,Ordering};
pub struct ThreadComm<T> {
n_threads: usize,
//Slot has a MatrixBuffer, to be broadcast
slot: AtomicPtr<T>,
//Slot_reads represents the number of times slot has been read.
//If slot_reads == n_threads, then it is ready to be written to.
//If slot_reads < n_threads, it is ready to be read.
//Each thread is only allowed to read from the slot one time.
//It is incremented every time slot is read,
//And it is an integer modulo n_threads
slot_reads: AtomicUsize,
//barrier: Barrier,
//Stuff for barriers
barrier_sense: AtomicBool,
barrier_threads_arrived: AtomicUsize,
//I guess subcomms needs to have interor mutability?
//sub_comms: Vec<AtomicPtr<Arc<ThreadComm<T>>>>,
sub_comms: Vec<RwLock<Option<Arc<ThreadComm<T>>>>>,
}
impl<T> ThreadComm<T> {
pub fn new(n_threads: usize) -> ThreadComm<T> {
let init_ptr: *const T = ptr::null();
let mut sub_comms = Vec::with_capacity(n_threads);
for _ in 0..n_threads {
sub_comms.push(RwLock::new(Option::None));
}
ThreadComm{ n_threads: n_threads,
slot: AtomicPtr::new(init_ptr as *mut T),
slot_reads: AtomicUsize::new(n_threads),
// barrier: Barrier::new(n_threads),
barrier_sense: AtomicBool::new(false),
barrier_threads_arrived: AtomicUsize::new(0),
sub_comms: sub_comms,
}
}
fn barrier(&self, _thread_id: usize) {
if self.n_threads == 1 {
return;
}
let my_sense = self.barrier_sense.load(Ordering::Relaxed);
let my_threads_arrived = self.barrier_threads_arrived.fetch_add(1,Ordering::Relaxed);
if my_threads_arrived == self.n_threads-1 {
self.barrier_threads_arrived.store(0,Ordering::Relaxed);
self.barrier_sense.fetch_xor(true, Ordering::Relaxed);
} else {
while self.barrier_sense.load(Ordering::Relaxed) == my_sense { }
}
//self.barrier.wait();
}
fn
|
(&self, info: &ThreadInfo<T>, to_send: *mut T) -> *mut T {
if info.thread_id == 0 {
//Spin while waiting for the thread communicator to be ready to broadcast
while self.slot_reads.load(Ordering::Relaxed)!= self.n_threads {
}
self.slot.store(to_send, Ordering::Relaxed);
self.slot_reads.store(0, Ordering::Relaxed);
}
//Spin while waiting for the thread communicator chief to broadcast
while self.slot_reads.load(Ordering::Relaxed) == self.n_threads {
}
self.slot_reads.fetch_add(1, Ordering::Relaxed);
self.slot.load(Ordering::Relaxed)
/*
self.barrier(info.thread_id);
if info.thread_id == 0 {
self.slot.store(to_send, Ordering::Relaxed);
}
self.barrier(info.thread_id);
self.slot.load(Ordering::Relaxed)*/
}
//Pretty sure with this implementation, split can only be called one time.
fn split(&self, thread_id: usize, n_way: usize) -> Arc<ThreadComm<T>> {
assert_eq!(self.n_threads % n_way, 0);
let subcomm_n_threads = self.n_threads / n_way;
let sub_comm_number = thread_id / subcomm_n_threads; // Which subcomm are we going to use?
let sub_comm_id = thread_id % subcomm_n_threads; // What is our id within the subcomm?
self.barrier(thread_id);
if sub_comm_id == 0 {
let mut sub_comm = self.sub_comms[sub_comm_number].write().unwrap();
*sub_comm = Option::Some(Arc::new(ThreadComm::new(subcomm_n_threads)));
}
self.barrier(thread_id);
let comm = self.sub_comms[sub_comm_number].read().unwrap().clone();
comm.unwrap()
}
}
//unsafe impl Sync for ThreadComm {}
//unsafe impl Send for ThreadComm {}
pub struct ThreadInfo<T> {
thread_id: usize,
comm: Arc<ThreadComm<T>>,
}
impl<T> ThreadInfo<T> {
pub fn new( id: usize, comm: Arc<ThreadComm<T>> ) -> ThreadInfo<T> {
ThreadInfo{ thread_id: id, comm: comm }
}
pub fn single_thread() -> ThreadInfo<T>{
ThreadInfo{ thread_id : 0, comm : Arc::new(ThreadComm::new(1)) }
}
pub fn barrier(&self) {
self.comm.barrier(self.thread_id);
}
pub fn broadcast(&self, to_send: *mut T) -> *mut T {
self.comm.broadcast(self, to_send)
}
pub fn num_threads(&self) -> usize { self.comm.n_threads }
pub fn thread_id(&self) -> usize { self.thread_id }
pub fn split(&self, n_way: usize) -> ThreadInfo<T> {
let subcomm = self.comm.split(self.thread_id, n_way);
let subcomm_id = self.thread_id % (self.comm.n_threads / n_way);
ThreadInfo{ thread_id: subcomm_id, comm: subcomm }
}
}
|
broadcast
|
identifier_name
|
thread_comm.rs
|
extern crate alloc;
use core::ptr::{self};
use std::sync::{Arc,RwLock};
//use std::sync::{Barrier};
use std::sync::atomic::{AtomicPtr,AtomicUsize,AtomicBool,Ordering};
pub struct ThreadComm<T> {
n_threads: usize,
//Slot has a MatrixBuffer, to be broadcast
slot: AtomicPtr<T>,
//Slot_reads represents the number of times slot has been read.
|
//If slot_reads < n_threads, it is ready to be read.
//Each thread is only allowed to read from the slot one time.
//It is incremented every time slot is read,
//And it is an integer modulo n_threads
slot_reads: AtomicUsize,
//barrier: Barrier,
//Stuff for barriers
barrier_sense: AtomicBool,
barrier_threads_arrived: AtomicUsize,
//I guess subcomms needs to have interor mutability?
//sub_comms: Vec<AtomicPtr<Arc<ThreadComm<T>>>>,
sub_comms: Vec<RwLock<Option<Arc<ThreadComm<T>>>>>,
}
impl<T> ThreadComm<T> {
pub fn new(n_threads: usize) -> ThreadComm<T> {
let init_ptr: *const T = ptr::null();
let mut sub_comms = Vec::with_capacity(n_threads);
for _ in 0..n_threads {
sub_comms.push(RwLock::new(Option::None));
}
ThreadComm{ n_threads: n_threads,
slot: AtomicPtr::new(init_ptr as *mut T),
slot_reads: AtomicUsize::new(n_threads),
// barrier: Barrier::new(n_threads),
barrier_sense: AtomicBool::new(false),
barrier_threads_arrived: AtomicUsize::new(0),
sub_comms: sub_comms,
}
}
fn barrier(&self, _thread_id: usize) {
if self.n_threads == 1 {
return;
}
let my_sense = self.barrier_sense.load(Ordering::Relaxed);
let my_threads_arrived = self.barrier_threads_arrived.fetch_add(1,Ordering::Relaxed);
if my_threads_arrived == self.n_threads-1 {
self.barrier_threads_arrived.store(0,Ordering::Relaxed);
self.barrier_sense.fetch_xor(true, Ordering::Relaxed);
} else {
while self.barrier_sense.load(Ordering::Relaxed) == my_sense { }
}
//self.barrier.wait();
}
fn broadcast(&self, info: &ThreadInfo<T>, to_send: *mut T) -> *mut T {
if info.thread_id == 0 {
//Spin while waiting for the thread communicator to be ready to broadcast
while self.slot_reads.load(Ordering::Relaxed)!= self.n_threads {
}
self.slot.store(to_send, Ordering::Relaxed);
self.slot_reads.store(0, Ordering::Relaxed);
}
//Spin while waiting for the thread communicator chief to broadcast
while self.slot_reads.load(Ordering::Relaxed) == self.n_threads {
}
self.slot_reads.fetch_add(1, Ordering::Relaxed);
self.slot.load(Ordering::Relaxed)
/*
self.barrier(info.thread_id);
if info.thread_id == 0 {
self.slot.store(to_send, Ordering::Relaxed);
}
self.barrier(info.thread_id);
self.slot.load(Ordering::Relaxed)*/
}
//Pretty sure with this implementation, split can only be called one time.
fn split(&self, thread_id: usize, n_way: usize) -> Arc<ThreadComm<T>> {
assert_eq!(self.n_threads % n_way, 0);
let subcomm_n_threads = self.n_threads / n_way;
let sub_comm_number = thread_id / subcomm_n_threads; // Which subcomm are we going to use?
let sub_comm_id = thread_id % subcomm_n_threads; // What is our id within the subcomm?
self.barrier(thread_id);
if sub_comm_id == 0 {
let mut sub_comm = self.sub_comms[sub_comm_number].write().unwrap();
*sub_comm = Option::Some(Arc::new(ThreadComm::new(subcomm_n_threads)));
}
self.barrier(thread_id);
let comm = self.sub_comms[sub_comm_number].read().unwrap().clone();
comm.unwrap()
}
}
//unsafe impl Sync for ThreadComm {}
//unsafe impl Send for ThreadComm {}
pub struct ThreadInfo<T> {
thread_id: usize,
comm: Arc<ThreadComm<T>>,
}
impl<T> ThreadInfo<T> {
pub fn new( id: usize, comm: Arc<ThreadComm<T>> ) -> ThreadInfo<T> {
ThreadInfo{ thread_id: id, comm: comm }
}
pub fn single_thread() -> ThreadInfo<T>{
ThreadInfo{ thread_id : 0, comm : Arc::new(ThreadComm::new(1)) }
}
pub fn barrier(&self) {
self.comm.barrier(self.thread_id);
}
pub fn broadcast(&self, to_send: *mut T) -> *mut T {
self.comm.broadcast(self, to_send)
}
pub fn num_threads(&self) -> usize { self.comm.n_threads }
pub fn thread_id(&self) -> usize { self.thread_id }
pub fn split(&self, n_way: usize) -> ThreadInfo<T> {
let subcomm = self.comm.split(self.thread_id, n_way);
let subcomm_id = self.thread_id % (self.comm.n_threads / n_way);
ThreadInfo{ thread_id: subcomm_id, comm: subcomm }
}
}
|
//If slot_reads == n_threads, then it is ready to be written to.
|
random_line_split
|
trait-inheritance-subst2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
trait Panda<T> {
fn chomp(&self, bamboo: &T) -> T;
}
trait Add<RHS,Result>: Panda<RHS> {
fn add(&self, rhs: &RHS) -> Result;
}
trait MyNum : Add<Self,Self> { }
struct MyInt { val: isize }
impl Panda<MyInt> for MyInt {
fn chomp(&self, bamboo: &MyInt) -> MyInt {
mi(self.val + bamboo.val)
}
}
impl Add<MyInt, MyInt> for MyInt {
fn add(&self, other: &MyInt) -> MyInt { self.chomp(other) }
}
impl MyNum for MyInt {}
fn f<T:MyNum>(x: T, y: T) -> T {
return x.add(&y).chomp(&y);
}
fn mi(v: isize) -> MyInt
|
pub fn main() {
let (x, y) = (mi(3), mi(5));
let z = f(x, y);
assert_eq!(z.val, 13);
}
|
{ MyInt { val: v } }
|
identifier_body
|
trait-inheritance-subst2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
trait Panda<T> {
fn chomp(&self, bamboo: &T) -> T;
}
trait Add<RHS,Result>: Panda<RHS> {
fn add(&self, rhs: &RHS) -> Result;
}
trait MyNum : Add<Self,Self> { }
struct MyInt { val: isize }
impl Panda<MyInt> for MyInt {
fn chomp(&self, bamboo: &MyInt) -> MyInt {
mi(self.val + bamboo.val)
}
|
fn add(&self, other: &MyInt) -> MyInt { self.chomp(other) }
}
impl MyNum for MyInt {}
fn f<T:MyNum>(x: T, y: T) -> T {
return x.add(&y).chomp(&y);
}
fn mi(v: isize) -> MyInt { MyInt { val: v } }
pub fn main() {
let (x, y) = (mi(3), mi(5));
let z = f(x, y);
assert_eq!(z.val, 13);
}
|
}
impl Add<MyInt, MyInt> for MyInt {
|
random_line_split
|
trait-inheritance-subst2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
trait Panda<T> {
fn chomp(&self, bamboo: &T) -> T;
}
trait Add<RHS,Result>: Panda<RHS> {
fn add(&self, rhs: &RHS) -> Result;
}
trait MyNum : Add<Self,Self> { }
struct MyInt { val: isize }
impl Panda<MyInt> for MyInt {
fn chomp(&self, bamboo: &MyInt) -> MyInt {
mi(self.val + bamboo.val)
}
}
impl Add<MyInt, MyInt> for MyInt {
fn
|
(&self, other: &MyInt) -> MyInt { self.chomp(other) }
}
impl MyNum for MyInt {}
fn f<T:MyNum>(x: T, y: T) -> T {
return x.add(&y).chomp(&y);
}
fn mi(v: isize) -> MyInt { MyInt { val: v } }
pub fn main() {
let (x, y) = (mi(3), mi(5));
let z = f(x, y);
assert_eq!(z.val, 13);
}
|
add
|
identifier_name
|
http-server.rs
|
extern crate mioco;
extern crate env_logger;
extern crate httparse;
use std::net::SocketAddr;
use std::str::FromStr;
use std::io::{self, Write, Read};
use mioco::net::TcpListener;
const DEFAULT_LISTEN_ADDR: &'static str = "127.0.0.1:5555";
fn listend_addr() -> SocketAddr {
FromStr::from_str(DEFAULT_LISTEN_ADDR).unwrap()
}
const RESPONSE: &'static str = "HTTP/1.1 200 OK\r
Content-Length: 11\r
\r
Hello World";
const RESPONSE_404: &'static str = "HTTP/1.1 404 Not Found\r
Content-Length: 11\r
\r
Hello World";
fn main() {
env_logger::init();
let addr = listend_addr();
let listener = TcpListener::bind(&addr).unwrap();
println!("Starting mioco http server on {:?}",
listener.local_addr().unwrap());
mioco::spawn(move || {
let mut joins: Vec<_> = (0..mioco::thread_num())
.map(|_| {
let listener = listener.try_clone().unwrap();
mioco::spawn(move || -> io::Result<()> {
loop {
let (mut conn, _addr) = listener.accept()?;
mioco::spawn(move || -> io::Result<()> {
let mut buf_i = 0;
let mut buf = [0u8; 1024];
loop {
let mut headers = [httparse::EMPTY_HEADER; 16];
let len = conn.read(&mut buf[buf_i..])?;
if len == 0 {
return Ok(());
}
buf_i += len;
let mut req = httparse::Request::new(&mut headers);
let res = req.parse(&buf[0..buf_i]).unwrap();
if res.is_complete()
|
}
});
}
})
})
.collect();
joins.drain(..).map(|join| join.join().unwrap()).count();
})
.join()
.unwrap();
}
|
{
let req_len = res.unwrap();
match req.path {
Some(ref _path) => {
let _ = conn.write_all(&RESPONSE.as_bytes())?;
if req_len != buf_i {
// request has a body; TODO: handle it
}
buf_i = 0;
}
None => {
let _ = conn.write_all(&RESPONSE_404.as_bytes())?;
return Ok(());
}
}
}
|
conditional_block
|
http-server.rs
|
extern crate mioco;
extern crate env_logger;
extern crate httparse;
use std::net::SocketAddr;
use std::str::FromStr;
use std::io::{self, Write, Read};
use mioco::net::TcpListener;
const DEFAULT_LISTEN_ADDR: &'static str = "127.0.0.1:5555";
fn listend_addr() -> SocketAddr {
FromStr::from_str(DEFAULT_LISTEN_ADDR).unwrap()
}
const RESPONSE: &'static str = "HTTP/1.1 200 OK\r
Content-Length: 11\r
\r
Hello World";
const RESPONSE_404: &'static str = "HTTP/1.1 404 Not Found\r
Content-Length: 11\r
\r
Hello World";
fn main()
|
loop {
let mut headers = [httparse::EMPTY_HEADER; 16];
let len = conn.read(&mut buf[buf_i..])?;
if len == 0 {
return Ok(());
}
buf_i += len;
let mut req = httparse::Request::new(&mut headers);
let res = req.parse(&buf[0..buf_i]).unwrap();
if res.is_complete() {
let req_len = res.unwrap();
match req.path {
Some(ref _path) => {
let _ = conn.write_all(&RESPONSE.as_bytes())?;
if req_len!= buf_i {
// request has a body; TODO: handle it
}
buf_i = 0;
}
None => {
let _ = conn.write_all(&RESPONSE_404.as_bytes())?;
return Ok(());
}
}
}
}
});
}
})
})
.collect();
joins.drain(..).map(|join| join.join().unwrap()).count();
})
.join()
.unwrap();
}
|
{
env_logger::init();
let addr = listend_addr();
let listener = TcpListener::bind(&addr).unwrap();
println!("Starting mioco http server on {:?}",
listener.local_addr().unwrap());
mioco::spawn(move || {
let mut joins: Vec<_> = (0..mioco::thread_num())
.map(|_| {
let listener = listener.try_clone().unwrap();
mioco::spawn(move || -> io::Result<()> {
loop {
let (mut conn, _addr) = listener.accept()?;
mioco::spawn(move || -> io::Result<()> {
let mut buf_i = 0;
let mut buf = [0u8; 1024];
|
identifier_body
|
http-server.rs
|
extern crate mioco;
extern crate env_logger;
extern crate httparse;
use std::net::SocketAddr;
use std::str::FromStr;
use std::io::{self, Write, Read};
use mioco::net::TcpListener;
const DEFAULT_LISTEN_ADDR: &'static str = "127.0.0.1:5555";
fn listend_addr() -> SocketAddr {
FromStr::from_str(DEFAULT_LISTEN_ADDR).unwrap()
}
const RESPONSE: &'static str = "HTTP/1.1 200 OK\r
Content-Length: 11\r
\r
Hello World";
const RESPONSE_404: &'static str = "HTTP/1.1 404 Not Found\r
Content-Length: 11\r
\r
Hello World";
fn
|
() {
env_logger::init();
let addr = listend_addr();
let listener = TcpListener::bind(&addr).unwrap();
println!("Starting mioco http server on {:?}",
listener.local_addr().unwrap());
mioco::spawn(move || {
let mut joins: Vec<_> = (0..mioco::thread_num())
.map(|_| {
let listener = listener.try_clone().unwrap();
mioco::spawn(move || -> io::Result<()> {
loop {
let (mut conn, _addr) = listener.accept()?;
mioco::spawn(move || -> io::Result<()> {
let mut buf_i = 0;
let mut buf = [0u8; 1024];
loop {
let mut headers = [httparse::EMPTY_HEADER; 16];
let len = conn.read(&mut buf[buf_i..])?;
if len == 0 {
return Ok(());
}
buf_i += len;
let mut req = httparse::Request::new(&mut headers);
let res = req.parse(&buf[0..buf_i]).unwrap();
if res.is_complete() {
let req_len = res.unwrap();
match req.path {
Some(ref _path) => {
let _ = conn.write_all(&RESPONSE.as_bytes())?;
if req_len!= buf_i {
// request has a body; TODO: handle it
}
buf_i = 0;
}
None => {
let _ = conn.write_all(&RESPONSE_404.as_bytes())?;
return Ok(());
}
}
}
}
});
}
})
})
.collect();
joins.drain(..).map(|join| join.join().unwrap()).count();
})
.join()
.unwrap();
}
|
main
|
identifier_name
|
http-server.rs
|
extern crate mioco;
extern crate env_logger;
extern crate httparse;
use std::net::SocketAddr;
use std::str::FromStr;
use std::io::{self, Write, Read};
use mioco::net::TcpListener;
const DEFAULT_LISTEN_ADDR: &'static str = "127.0.0.1:5555";
fn listend_addr() -> SocketAddr {
FromStr::from_str(DEFAULT_LISTEN_ADDR).unwrap()
}
const RESPONSE: &'static str = "HTTP/1.1 200 OK\r
Content-Length: 11\r
\r
Hello World";
const RESPONSE_404: &'static str = "HTTP/1.1 404 Not Found\r
Content-Length: 11\r
\r
Hello World";
fn main() {
env_logger::init();
let addr = listend_addr();
let listener = TcpListener::bind(&addr).unwrap();
println!("Starting mioco http server on {:?}",
listener.local_addr().unwrap());
mioco::spawn(move || {
let mut joins: Vec<_> = (0..mioco::thread_num())
.map(|_| {
let listener = listener.try_clone().unwrap();
mioco::spawn(move || -> io::Result<()> {
loop {
let (mut conn, _addr) = listener.accept()?;
mioco::spawn(move || -> io::Result<()> {
let mut buf_i = 0;
let mut buf = [0u8; 1024];
loop {
let mut headers = [httparse::EMPTY_HEADER; 16];
let len = conn.read(&mut buf[buf_i..])?;
if len == 0 {
return Ok(());
}
buf_i += len;
let mut req = httparse::Request::new(&mut headers);
let res = req.parse(&buf[0..buf_i]).unwrap();
if res.is_complete() {
let req_len = res.unwrap();
match req.path {
Some(ref _path) => {
let _ = conn.write_all(&RESPONSE.as_bytes())?;
if req_len!= buf_i {
// request has a body; TODO: handle it
}
buf_i = 0;
}
None => {
let _ = conn.write_all(&RESPONSE_404.as_bytes())?;
return Ok(());
}
}
|
}
});
}
})
})
.collect();
joins.drain(..).map(|join| join.join().unwrap()).count();
})
.join()
.unwrap();
}
|
}
|
random_line_split
|
helpers.rs
|
use handlebars::*;
use serde_json::value::Value;
use serde_json::Map;
#[derive(PartialEq)]
enum Kind {
Object,
Array,
String,
Number,
Boolean,
Null,
}
struct IsKind {
kind: Kind,
}
impl HelperDef for IsKind {
fn call(&self, h: &Helper, r: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> {
let param = h.param(0)
.ok_or(RenderError::new("Param expected for helper"))?;
match if self.kind == Kind::Object && param.value().is_object() ||
self.kind == Kind::Array && param.value().is_array() ||
self.kind == Kind::String && param.value().is_string() ||
self.kind == Kind::Number && param.value().is_number() ||
self.kind == Kind::Boolean && param.value().is_boolean() ||
self.kind == Kind::Null && param.value().is_null()
|
else {
h.inverse()
} {
Some(ref t) => t.render(r, rc),
None => Ok(()),
}
}
}
fn include_helper(h: &Helper, r: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> {
let param = h.param(0)
.ok_or(RenderError::new("Param expected for helper"))?;
match param.value().as_str() {
Some(s) => {
match r.get_template(s) {
Some(t) => t.render(r, rc),
None => Err(RenderError::new("Template not found")),
}
}
None => Err(RenderError::new("String parameter expected")),
}
}
fn annotate_decorator(_: &Decorator,
_: &Handlebars,
rc: &mut RenderContext)
-> Result<(), RenderError> {
fn annotate_map(map: &mut Map<String, Value>) {
for (k, v) in map {
if let Some(ref mut m) = v.as_object_mut().as_mut() {
annotate_map(**m);
m.insert("@name".to_string(), to_json(&k));
}
}
}
if let Some(ref mut m) = rc.context_mut().data_mut().as_object_mut().as_mut() {
annotate_map(m)
}
Ok(())
}
pub fn add_helpers(hb: &mut Handlebars) {
hb.register_helper("if_object", Box::new(IsKind { kind: Kind::Object }));
hb.register_helper("if_array", Box::new(IsKind { kind: Kind::Array }));
hb.register_helper("if_string", Box::new(IsKind { kind: Kind::String }));
hb.register_helper("if_number", Box::new(IsKind { kind: Kind::Number }));
hb.register_helper("if_boolean", Box::new(IsKind { kind: Kind::Boolean }));
hb.register_helper("if_null", Box::new(IsKind { kind: Kind::Null }));
hb.register_helper("include", Box::new(include_helper));
hb.register_decorator("annotate", Box::new(annotate_decorator));
}
|
{
h.template()
}
|
conditional_block
|
helpers.rs
|
use handlebars::*;
use serde_json::value::Value;
use serde_json::Map;
#[derive(PartialEq)]
enum Kind {
Object,
Array,
String,
Number,
Boolean,
Null,
}
struct IsKind {
kind: Kind,
}
impl HelperDef for IsKind {
fn call(&self, h: &Helper, r: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> {
let param = h.param(0)
.ok_or(RenderError::new("Param expected for helper"))?;
match if self.kind == Kind::Object && param.value().is_object() ||
self.kind == Kind::Array && param.value().is_array() ||
self.kind == Kind::String && param.value().is_string() ||
self.kind == Kind::Number && param.value().is_number() ||
self.kind == Kind::Boolean && param.value().is_boolean() ||
self.kind == Kind::Null && param.value().is_null() {
h.template()
} else {
h.inverse()
} {
Some(ref t) => t.render(r, rc),
None => Ok(()),
}
}
}
fn include_helper(h: &Helper, r: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> {
let param = h.param(0)
.ok_or(RenderError::new("Param expected for helper"))?;
match param.value().as_str() {
Some(s) => {
match r.get_template(s) {
Some(t) => t.render(r, rc),
None => Err(RenderError::new("Template not found")),
}
}
None => Err(RenderError::new("String parameter expected")),
}
}
fn annotate_decorator(_: &Decorator,
_: &Handlebars,
rc: &mut RenderContext)
-> Result<(), RenderError> {
fn annotate_map(map: &mut Map<String, Value>) {
for (k, v) in map {
if let Some(ref mut m) = v.as_object_mut().as_mut() {
annotate_map(**m);
m.insert("@name".to_string(), to_json(&k));
}
}
}
if let Some(ref mut m) = rc.context_mut().data_mut().as_object_mut().as_mut() {
annotate_map(m)
}
Ok(())
}
pub fn add_helpers(hb: &mut Handlebars) {
hb.register_helper("if_object", Box::new(IsKind { kind: Kind::Object }));
hb.register_helper("if_array", Box::new(IsKind { kind: Kind::Array }));
hb.register_helper("if_string", Box::new(IsKind { kind: Kind::String }));
hb.register_helper("if_number", Box::new(IsKind { kind: Kind::Number }));
hb.register_helper("if_boolean", Box::new(IsKind { kind: Kind::Boolean }));
|
hb.register_decorator("annotate", Box::new(annotate_decorator));
}
|
hb.register_helper("if_null", Box::new(IsKind { kind: Kind::Null }));
hb.register_helper("include", Box::new(include_helper));
|
random_line_split
|
helpers.rs
|
use handlebars::*;
use serde_json::value::Value;
use serde_json::Map;
#[derive(PartialEq)]
enum
|
{
Object,
Array,
String,
Number,
Boolean,
Null,
}
struct IsKind {
kind: Kind,
}
impl HelperDef for IsKind {
fn call(&self, h: &Helper, r: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> {
let param = h.param(0)
.ok_or(RenderError::new("Param expected for helper"))?;
match if self.kind == Kind::Object && param.value().is_object() ||
self.kind == Kind::Array && param.value().is_array() ||
self.kind == Kind::String && param.value().is_string() ||
self.kind == Kind::Number && param.value().is_number() ||
self.kind == Kind::Boolean && param.value().is_boolean() ||
self.kind == Kind::Null && param.value().is_null() {
h.template()
} else {
h.inverse()
} {
Some(ref t) => t.render(r, rc),
None => Ok(()),
}
}
}
fn include_helper(h: &Helper, r: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> {
let param = h.param(0)
.ok_or(RenderError::new("Param expected for helper"))?;
match param.value().as_str() {
Some(s) => {
match r.get_template(s) {
Some(t) => t.render(r, rc),
None => Err(RenderError::new("Template not found")),
}
}
None => Err(RenderError::new("String parameter expected")),
}
}
fn annotate_decorator(_: &Decorator,
_: &Handlebars,
rc: &mut RenderContext)
-> Result<(), RenderError> {
fn annotate_map(map: &mut Map<String, Value>) {
for (k, v) in map {
if let Some(ref mut m) = v.as_object_mut().as_mut() {
annotate_map(**m);
m.insert("@name".to_string(), to_json(&k));
}
}
}
if let Some(ref mut m) = rc.context_mut().data_mut().as_object_mut().as_mut() {
annotate_map(m)
}
Ok(())
}
pub fn add_helpers(hb: &mut Handlebars) {
hb.register_helper("if_object", Box::new(IsKind { kind: Kind::Object }));
hb.register_helper("if_array", Box::new(IsKind { kind: Kind::Array }));
hb.register_helper("if_string", Box::new(IsKind { kind: Kind::String }));
hb.register_helper("if_number", Box::new(IsKind { kind: Kind::Number }));
hb.register_helper("if_boolean", Box::new(IsKind { kind: Kind::Boolean }));
hb.register_helper("if_null", Box::new(IsKind { kind: Kind::Null }));
hb.register_helper("include", Box::new(include_helper));
hb.register_decorator("annotate", Box::new(annotate_decorator));
}
|
Kind
|
identifier_name
|
helpers.rs
|
use handlebars::*;
use serde_json::value::Value;
use serde_json::Map;
#[derive(PartialEq)]
enum Kind {
Object,
Array,
String,
Number,
Boolean,
Null,
}
struct IsKind {
kind: Kind,
}
impl HelperDef for IsKind {
fn call(&self, h: &Helper, r: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> {
let param = h.param(0)
.ok_or(RenderError::new("Param expected for helper"))?;
match if self.kind == Kind::Object && param.value().is_object() ||
self.kind == Kind::Array && param.value().is_array() ||
self.kind == Kind::String && param.value().is_string() ||
self.kind == Kind::Number && param.value().is_number() ||
self.kind == Kind::Boolean && param.value().is_boolean() ||
self.kind == Kind::Null && param.value().is_null() {
h.template()
} else {
h.inverse()
} {
Some(ref t) => t.render(r, rc),
None => Ok(()),
}
}
}
fn include_helper(h: &Helper, r: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError>
|
fn annotate_decorator(_: &Decorator,
_: &Handlebars,
rc: &mut RenderContext)
-> Result<(), RenderError> {
fn annotate_map(map: &mut Map<String, Value>) {
for (k, v) in map {
if let Some(ref mut m) = v.as_object_mut().as_mut() {
annotate_map(**m);
m.insert("@name".to_string(), to_json(&k));
}
}
}
if let Some(ref mut m) = rc.context_mut().data_mut().as_object_mut().as_mut() {
annotate_map(m)
}
Ok(())
}
pub fn add_helpers(hb: &mut Handlebars) {
hb.register_helper("if_object", Box::new(IsKind { kind: Kind::Object }));
hb.register_helper("if_array", Box::new(IsKind { kind: Kind::Array }));
hb.register_helper("if_string", Box::new(IsKind { kind: Kind::String }));
hb.register_helper("if_number", Box::new(IsKind { kind: Kind::Number }));
hb.register_helper("if_boolean", Box::new(IsKind { kind: Kind::Boolean }));
hb.register_helper("if_null", Box::new(IsKind { kind: Kind::Null }));
hb.register_helper("include", Box::new(include_helper));
hb.register_decorator("annotate", Box::new(annotate_decorator));
}
|
{
let param = h.param(0)
.ok_or(RenderError::new("Param expected for helper"))?;
match param.value().as_str() {
Some(s) => {
match r.get_template(s) {
Some(t) => t.render(r, rc),
None => Err(RenderError::new("Template not found")),
}
}
None => Err(RenderError::new("String parameter expected")),
}
}
|
identifier_body
|
gpdma0_ch2.rs
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - Source Address Register"]
pub sar: crate::Reg<sar::SAR_SPEC>,
_reserved1: [u8; 0x04],
#[doc = "0x08 - Destination Address Register"]
pub dar: crate::Reg<dar::DAR_SPEC>,
_reserved2: [u8; 0x0c],
#[doc = "0x18 - Control Register Low"]
pub ctll: crate::Reg<ctll::CTLL_SPEC>,
#[doc = "0x1c - Control Register High"]
pub ctlh: crate::Reg<ctlh::CTLH_SPEC>,
_reserved4: [u8; 0x20],
#[doc = "0x40 - Configuration Register Low"]
pub cfgl: crate::Reg<cfgl::CFGL_SPEC>,
#[doc = "0x44 - Configuration Register High"]
pub cfgh: crate::Reg<cfgh::CFGH_SPEC>,
}
#[doc = "SAR register accessor: an alias for `Reg<SAR_SPEC>`"]
pub type SAR = crate::Reg<sar::SAR_SPEC>;
#[doc = "Source Address Register"]
pub mod sar;
#[doc = "DAR register accessor: an alias for `Reg<DAR_SPEC>`"]
pub type DAR = crate::Reg<dar::DAR_SPEC>;
|
pub mod dar;
#[doc = "CTLL register accessor: an alias for `Reg<CTLL_SPEC>`"]
pub type CTLL = crate::Reg<ctll::CTLL_SPEC>;
#[doc = "Control Register Low"]
pub mod ctll;
#[doc = "CTLH register accessor: an alias for `Reg<CTLH_SPEC>`"]
pub type CTLH = crate::Reg<ctlh::CTLH_SPEC>;
#[doc = "Control Register High"]
pub mod ctlh;
#[doc = "CFGL register accessor: an alias for `Reg<CFGL_SPEC>`"]
pub type CFGL = crate::Reg<cfgl::CFGL_SPEC>;
#[doc = "Configuration Register Low"]
pub mod cfgl;
#[doc = "CFGH register accessor: an alias for `Reg<CFGH_SPEC>`"]
pub type CFGH = crate::Reg<cfgh::CFGH_SPEC>;
#[doc = "Configuration Register High"]
pub mod cfgh;
|
#[doc = "Destination Address Register"]
|
random_line_split
|
gpdma0_ch2.rs
|
#[doc = r"Register block"]
#[repr(C)]
pub struct
|
{
#[doc = "0x00 - Source Address Register"]
pub sar: crate::Reg<sar::SAR_SPEC>,
_reserved1: [u8; 0x04],
#[doc = "0x08 - Destination Address Register"]
pub dar: crate::Reg<dar::DAR_SPEC>,
_reserved2: [u8; 0x0c],
#[doc = "0x18 - Control Register Low"]
pub ctll: crate::Reg<ctll::CTLL_SPEC>,
#[doc = "0x1c - Control Register High"]
pub ctlh: crate::Reg<ctlh::CTLH_SPEC>,
_reserved4: [u8; 0x20],
#[doc = "0x40 - Configuration Register Low"]
pub cfgl: crate::Reg<cfgl::CFGL_SPEC>,
#[doc = "0x44 - Configuration Register High"]
pub cfgh: crate::Reg<cfgh::CFGH_SPEC>,
}
#[doc = "SAR register accessor: an alias for `Reg<SAR_SPEC>`"]
pub type SAR = crate::Reg<sar::SAR_SPEC>;
#[doc = "Source Address Register"]
pub mod sar;
#[doc = "DAR register accessor: an alias for `Reg<DAR_SPEC>`"]
pub type DAR = crate::Reg<dar::DAR_SPEC>;
#[doc = "Destination Address Register"]
pub mod dar;
#[doc = "CTLL register accessor: an alias for `Reg<CTLL_SPEC>`"]
pub type CTLL = crate::Reg<ctll::CTLL_SPEC>;
#[doc = "Control Register Low"]
pub mod ctll;
#[doc = "CTLH register accessor: an alias for `Reg<CTLH_SPEC>`"]
pub type CTLH = crate::Reg<ctlh::CTLH_SPEC>;
#[doc = "Control Register High"]
pub mod ctlh;
#[doc = "CFGL register accessor: an alias for `Reg<CFGL_SPEC>`"]
pub type CFGL = crate::Reg<cfgl::CFGL_SPEC>;
#[doc = "Configuration Register Low"]
pub mod cfgl;
#[doc = "CFGH register accessor: an alias for `Reg<CFGH_SPEC>`"]
pub type CFGH = crate::Reg<cfgh::CFGH_SPEC>;
#[doc = "Configuration Register High"]
pub mod cfgh;
|
RegisterBlock
|
identifier_name
|
enums.rs
|
use parking_lot::RwLock;
use std::collections::hash_map::HashMap;
use std::convert::TryInto;
use std::ops::Index;
use std::sync::Arc;
use dora_parser::ast;
use dora_parser::interner::Name;
use dora_parser::lexer::position::Position;
use crate::language::ty::{SourceType, SourceTypeArray};
use crate::utils::GrowableVec;
use crate::vm::{
extension_matches, impl_matches, namespace_path, Candidate, ClassInstanceId, ExtensionId,
FileId, ImplId, NamespaceId, TypeParam, TypeParamDefinition, TypeParamId, VM,
};
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EnumDefinitionId(u32);
impl EnumDefinitionId {
pub fn to_usize(self) -> usize {
self.0 as usize
}
}
impl From<usize> for EnumDefinitionId {
fn from(data: usize) -> EnumDefinitionId {
EnumDefinitionId(data.try_into().unwrap())
}
}
impl Index<EnumDefinitionId> for Vec<RwLock<EnumDefinition>> {
type Output = RwLock<EnumDefinition>;
fn index(&self, index: EnumDefinitionId) -> &RwLock<EnumDefinition> {
&self[index.0 as usize]
}
}
#[derive(Debug)]
pub struct EnumDefinition {
pub id: EnumDefinitionId,
pub file_id: FileId,
pub namespace_id: NamespaceId,
pub ast: Arc<ast::Enum>,
pub pos: Position,
pub name: Name,
pub is_pub: bool,
pub type_params: Vec<TypeParam>,
pub type_params2: TypeParamDefinition,
pub variants: Vec<EnumVariant>,
pub name_to_value: HashMap<Name, u32>,
pub impls: Vec<ImplId>,
pub extensions: Vec<ExtensionId>,
pub specializations: RwLock<HashMap<SourceTypeArray, EnumInstanceId>>,
pub simple_enumeration: bool,
}
impl EnumDefinition {
pub fn type_param(&self, id: TypeParamId) -> &TypeParam {
&self.type_params[id.to_usize()]
}
pub fn name(&self, vm: &VM) -> String {
namespace_path(vm, self.namespace_id, self.name)
}
pub fn name_with_params(&self, vm: &VM, type_list: &SourceTypeArray) -> String {
let name = vm.interner.str(self.name);
if type_list.len() > 0 {
let type_list = type_list
.iter()
.map(|p| p.name_enum(vm, self))
.collect::<Vec<_>>()
.join(", ");
format!("{}[{}]", name, type_list)
} else {
name.to_string()
}
}
}
#[derive(Debug)]
pub struct EnumVariant {
pub id: usize,
pub name: Name,
pub types: Vec<SourceType>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EnumInstanceId(u32);
impl From<usize> for EnumInstanceId {
fn from(data: usize) -> EnumInstanceId {
EnumInstanceId(data as u32)
}
}
impl GrowableVec<EnumInstance> {
pub fn idx(&self, index: EnumInstanceId) -> Arc<EnumInstance> {
self.idx_usize(index.0 as usize)
}
}
#[derive(Debug)]
pub struct EnumInstance {
pub id: EnumInstanceId,
pub enum_id: EnumDefinitionId,
pub type_params: SourceTypeArray,
pub layout: EnumLayout,
pub variants: RwLock<Vec<Option<ClassInstanceId>>>,
}
impl EnumInstance {
pub fn field_id(&self, xenum: &EnumDefinition, variant_id: usize, element: u32) -> u32 {
let variant = &xenum.variants[variant_id];
let mut units = 0;
for ty in &variant.types[0..element as usize] {
if ty.is_unit() {
units += 1;
}
}
1 + element - units
}
}
#[derive(Copy, Clone, Debug)]
pub enum EnumLayout {
Int,
Ptr,
Tagged,
}
#[derive(Debug)]
pub struct EnumDefVariant {
pub types: Vec<SourceType>,
}
pub fn find_methods_in_enum(
vm: &VM,
object_type: SourceType,
type_param_defs: &[TypeParam],
type_param_defs2: Option<&TypeParamDefinition>,
name: Name,
is_static: bool,
) -> Vec<Candidate> {
let enum_id = object_type.enum_id().unwrap();
let xenum = vm.enums[enum_id].read();
for &extension_id in &xenum.extensions {
if let Some(bindings) = extension_matches(
vm,
object_type.clone(),
type_param_defs,
type_param_defs2,
extension_id,
) {
let extension = vm.extensions[extension_id].read();
let table = if is_static {
&extension.static_names
} else {
&extension.instance_names
};
if let Some(&fct_id) = table.get(&name) {
return vec![Candidate {
object_type: object_type.clone(),
container_type_params: bindings,
fct_id,
}];
}
}
}
let mut candidates = Vec::new();
for &impl_id in &xenum.impls {
if let Some(bindings) = impl_matches(
vm,
object_type.clone(),
type_param_defs,
type_param_defs2,
impl_id,
) {
let ximpl = vm.impls[impl_id].read();
let table = if is_static {
|
};
if let Some(&method_id) = table.get(&name) {
candidates.push(Candidate {
object_type: object_type.clone(),
container_type_params: bindings.clone(),
fct_id: method_id,
});
}
}
}
candidates
}
|
&ximpl.static_names
} else {
&ximpl.instance_names
|
random_line_split
|
enums.rs
|
use parking_lot::RwLock;
use std::collections::hash_map::HashMap;
use std::convert::TryInto;
use std::ops::Index;
use std::sync::Arc;
use dora_parser::ast;
use dora_parser::interner::Name;
use dora_parser::lexer::position::Position;
use crate::language::ty::{SourceType, SourceTypeArray};
use crate::utils::GrowableVec;
use crate::vm::{
extension_matches, impl_matches, namespace_path, Candidate, ClassInstanceId, ExtensionId,
FileId, ImplId, NamespaceId, TypeParam, TypeParamDefinition, TypeParamId, VM,
};
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EnumDefinitionId(u32);
impl EnumDefinitionId {
pub fn to_usize(self) -> usize {
self.0 as usize
}
}
impl From<usize> for EnumDefinitionId {
fn from(data: usize) -> EnumDefinitionId {
EnumDefinitionId(data.try_into().unwrap())
}
}
impl Index<EnumDefinitionId> for Vec<RwLock<EnumDefinition>> {
type Output = RwLock<EnumDefinition>;
fn index(&self, index: EnumDefinitionId) -> &RwLock<EnumDefinition> {
&self[index.0 as usize]
}
}
#[derive(Debug)]
pub struct EnumDefinition {
pub id: EnumDefinitionId,
pub file_id: FileId,
pub namespace_id: NamespaceId,
pub ast: Arc<ast::Enum>,
pub pos: Position,
pub name: Name,
pub is_pub: bool,
pub type_params: Vec<TypeParam>,
pub type_params2: TypeParamDefinition,
pub variants: Vec<EnumVariant>,
pub name_to_value: HashMap<Name, u32>,
pub impls: Vec<ImplId>,
pub extensions: Vec<ExtensionId>,
pub specializations: RwLock<HashMap<SourceTypeArray, EnumInstanceId>>,
pub simple_enumeration: bool,
}
impl EnumDefinition {
pub fn type_param(&self, id: TypeParamId) -> &TypeParam {
&self.type_params[id.to_usize()]
}
pub fn name(&self, vm: &VM) -> String {
namespace_path(vm, self.namespace_id, self.name)
}
pub fn name_with_params(&self, vm: &VM, type_list: &SourceTypeArray) -> String {
let name = vm.interner.str(self.name);
if type_list.len() > 0 {
let type_list = type_list
.iter()
.map(|p| p.name_enum(vm, self))
.collect::<Vec<_>>()
.join(", ");
format!("{}[{}]", name, type_list)
} else {
name.to_string()
}
}
}
#[derive(Debug)]
pub struct EnumVariant {
pub id: usize,
pub name: Name,
pub types: Vec<SourceType>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EnumInstanceId(u32);
impl From<usize> for EnumInstanceId {
fn from(data: usize) -> EnumInstanceId {
EnumInstanceId(data as u32)
}
}
impl GrowableVec<EnumInstance> {
pub fn idx(&self, index: EnumInstanceId) -> Arc<EnumInstance> {
self.idx_usize(index.0 as usize)
}
}
#[derive(Debug)]
pub struct EnumInstance {
pub id: EnumInstanceId,
pub enum_id: EnumDefinitionId,
pub type_params: SourceTypeArray,
pub layout: EnumLayout,
pub variants: RwLock<Vec<Option<ClassInstanceId>>>,
}
impl EnumInstance {
pub fn field_id(&self, xenum: &EnumDefinition, variant_id: usize, element: u32) -> u32 {
let variant = &xenum.variants[variant_id];
let mut units = 0;
for ty in &variant.types[0..element as usize] {
if ty.is_unit() {
units += 1;
}
}
1 + element - units
}
}
#[derive(Copy, Clone, Debug)]
pub enum EnumLayout {
Int,
Ptr,
Tagged,
}
#[derive(Debug)]
pub struct EnumDefVariant {
pub types: Vec<SourceType>,
}
pub fn find_methods_in_enum(
vm: &VM,
object_type: SourceType,
type_param_defs: &[TypeParam],
type_param_defs2: Option<&TypeParamDefinition>,
name: Name,
is_static: bool,
) -> Vec<Candidate> {
let enum_id = object_type.enum_id().unwrap();
let xenum = vm.enums[enum_id].read();
for &extension_id in &xenum.extensions {
if let Some(bindings) = extension_matches(
vm,
object_type.clone(),
type_param_defs,
type_param_defs2,
extension_id,
) {
let extension = vm.extensions[extension_id].read();
let table = if is_static {
&extension.static_names
} else {
&extension.instance_names
};
if let Some(&fct_id) = table.get(&name) {
return vec![Candidate {
object_type: object_type.clone(),
container_type_params: bindings,
fct_id,
}];
}
}
}
let mut candidates = Vec::new();
for &impl_id in &xenum.impls {
if let Some(bindings) = impl_matches(
vm,
object_type.clone(),
type_param_defs,
type_param_defs2,
impl_id,
) {
let ximpl = vm.impls[impl_id].read();
let table = if is_static {
&ximpl.static_names
} else {
&ximpl.instance_names
};
if let Some(&method_id) = table.get(&name)
|
}
}
candidates
}
|
{
candidates.push(Candidate {
object_type: object_type.clone(),
container_type_params: bindings.clone(),
fct_id: method_id,
});
}
|
conditional_block
|
enums.rs
|
use parking_lot::RwLock;
use std::collections::hash_map::HashMap;
use std::convert::TryInto;
use std::ops::Index;
use std::sync::Arc;
use dora_parser::ast;
use dora_parser::interner::Name;
use dora_parser::lexer::position::Position;
use crate::language::ty::{SourceType, SourceTypeArray};
use crate::utils::GrowableVec;
use crate::vm::{
extension_matches, impl_matches, namespace_path, Candidate, ClassInstanceId, ExtensionId,
FileId, ImplId, NamespaceId, TypeParam, TypeParamDefinition, TypeParamId, VM,
};
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EnumDefinitionId(u32);
impl EnumDefinitionId {
pub fn to_usize(self) -> usize {
self.0 as usize
}
}
impl From<usize> for EnumDefinitionId {
fn from(data: usize) -> EnumDefinitionId {
EnumDefinitionId(data.try_into().unwrap())
}
}
impl Index<EnumDefinitionId> for Vec<RwLock<EnumDefinition>> {
type Output = RwLock<EnumDefinition>;
fn index(&self, index: EnumDefinitionId) -> &RwLock<EnumDefinition> {
&self[index.0 as usize]
}
}
#[derive(Debug)]
pub struct EnumDefinition {
pub id: EnumDefinitionId,
pub file_id: FileId,
pub namespace_id: NamespaceId,
pub ast: Arc<ast::Enum>,
pub pos: Position,
pub name: Name,
pub is_pub: bool,
pub type_params: Vec<TypeParam>,
pub type_params2: TypeParamDefinition,
pub variants: Vec<EnumVariant>,
pub name_to_value: HashMap<Name, u32>,
pub impls: Vec<ImplId>,
pub extensions: Vec<ExtensionId>,
pub specializations: RwLock<HashMap<SourceTypeArray, EnumInstanceId>>,
pub simple_enumeration: bool,
}
impl EnumDefinition {
pub fn type_param(&self, id: TypeParamId) -> &TypeParam {
&self.type_params[id.to_usize()]
}
pub fn name(&self, vm: &VM) -> String {
namespace_path(vm, self.namespace_id, self.name)
}
pub fn name_with_params(&self, vm: &VM, type_list: &SourceTypeArray) -> String {
let name = vm.interner.str(self.name);
if type_list.len() > 0 {
let type_list = type_list
.iter()
.map(|p| p.name_enum(vm, self))
.collect::<Vec<_>>()
.join(", ");
format!("{}[{}]", name, type_list)
} else {
name.to_string()
}
}
}
#[derive(Debug)]
pub struct EnumVariant {
pub id: usize,
pub name: Name,
pub types: Vec<SourceType>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EnumInstanceId(u32);
impl From<usize> for EnumInstanceId {
fn
|
(data: usize) -> EnumInstanceId {
EnumInstanceId(data as u32)
}
}
impl GrowableVec<EnumInstance> {
pub fn idx(&self, index: EnumInstanceId) -> Arc<EnumInstance> {
self.idx_usize(index.0 as usize)
}
}
#[derive(Debug)]
pub struct EnumInstance {
pub id: EnumInstanceId,
pub enum_id: EnumDefinitionId,
pub type_params: SourceTypeArray,
pub layout: EnumLayout,
pub variants: RwLock<Vec<Option<ClassInstanceId>>>,
}
impl EnumInstance {
pub fn field_id(&self, xenum: &EnumDefinition, variant_id: usize, element: u32) -> u32 {
let variant = &xenum.variants[variant_id];
let mut units = 0;
for ty in &variant.types[0..element as usize] {
if ty.is_unit() {
units += 1;
}
}
1 + element - units
}
}
#[derive(Copy, Clone, Debug)]
pub enum EnumLayout {
Int,
Ptr,
Tagged,
}
#[derive(Debug)]
pub struct EnumDefVariant {
pub types: Vec<SourceType>,
}
pub fn find_methods_in_enum(
vm: &VM,
object_type: SourceType,
type_param_defs: &[TypeParam],
type_param_defs2: Option<&TypeParamDefinition>,
name: Name,
is_static: bool,
) -> Vec<Candidate> {
let enum_id = object_type.enum_id().unwrap();
let xenum = vm.enums[enum_id].read();
for &extension_id in &xenum.extensions {
if let Some(bindings) = extension_matches(
vm,
object_type.clone(),
type_param_defs,
type_param_defs2,
extension_id,
) {
let extension = vm.extensions[extension_id].read();
let table = if is_static {
&extension.static_names
} else {
&extension.instance_names
};
if let Some(&fct_id) = table.get(&name) {
return vec![Candidate {
object_type: object_type.clone(),
container_type_params: bindings,
fct_id,
}];
}
}
}
let mut candidates = Vec::new();
for &impl_id in &xenum.impls {
if let Some(bindings) = impl_matches(
vm,
object_type.clone(),
type_param_defs,
type_param_defs2,
impl_id,
) {
let ximpl = vm.impls[impl_id].read();
let table = if is_static {
&ximpl.static_names
} else {
&ximpl.instance_names
};
if let Some(&method_id) = table.get(&name) {
candidates.push(Candidate {
object_type: object_type.clone(),
container_type_params: bindings.clone(),
fct_id: method_id,
});
}
}
}
candidates
}
|
from
|
identifier_name
|
lib.rs
|
use std::fs::File;
use std::path::Path;
use std::str::FromStr;
use std::io::BufReader;
use std::io::prelude::*;
use std::collections::HashMap;
use std::cmp::Ordering;
enum GameResult {
Win,
Loss,
Draw,
}
struct Game {
team1: String,
team2: String,
result: GameResult,
}
struct TeamData {
played: usize,
won: usize,
draw: usize,
lost: usize,
points: usize,
}
impl TeamData {
fn new() -> TeamData {
TeamData {
played: 0,
won: 0,
draw: 0,
lost: 0,
points: 0,
}
}
}
impl FromStr for Game {
type Err = &'static str;
fn from_str(input: &str) -> Result<Game, Self::Err> {
let input = input.trim(); // remove trailing & leading whitespace
let input: Vec<&str> = input.split(";").collect();
if input.len()!= 3 {
return Err("Invalid line");
}
let outcame = match input[2] {
"win" => GameResult::Win,
"loss" => GameResult::Loss,
"draw" => GameResult::Draw,
_ => return Err("invalid outcame"),
};
Ok(Game {
team1: input[0].into(),
team2: input[1].into(),
result: outcame,
})
}
}
fn read_input(input: &Path) -> Vec<Game> {
let f = File::open(input).unwrap();
let file = BufReader::new(f);
let mut games = Vec::new();
for line in file.lines() {
if let Ok(game) = line.unwrap().parse::<Game>()
|
}
games
}
fn calc_data(games: &Vec<Game>) -> HashMap<String, TeamData> {
let mut teams: HashMap<String, TeamData> = HashMap::new();
for game in games {
let ((w1, l1, d1, p1), (w2, l2, d2, p2)) = match game.result {
GameResult::Win => ((1, 0, 0, 3), (0, 1, 0, 0)),
GameResult::Loss => ((0, 1, 0, 0), (1, 0, 0, 3)),
GameResult::Draw => ((0, 0, 1, 1), (0, 0, 1, 1)),
};
{
// new scope to satisfy the borrow checker
let team1 = teams.entry(game.team1.clone()).or_insert(TeamData::new());
team1.played += 1;
team1.won += w1;
team1.lost += l1;
team1.draw += d1;
team1.points += p1;
}
let team2 = teams.entry(game.team2.clone()).or_insert(TeamData::new());
team2.played += 1;
team2.won += w2;
team2.lost += l2;
team2.draw += d2;
team2.points += p2;
}
teams
}
fn write_output(content: &str, output: &Path) {
let mut buffer = File::create(output).unwrap();
buffer.write_all(content.as_bytes()).ok();
}
fn custom_sort(team1: &(&String, &TeamData), team2: &(&String, &TeamData)) -> Ordering {
match team1.1.points.cmp(&team2.1.points) {
Ordering::Equal => {
match team1.1.won.cmp(&team2.1.won) {
Ordering::Equal => team2.0.cmp(&team1.0),
ans @ _ => ans,
}
}
ans @ _ => ans,
}
}
fn pretty_print_results(results: &HashMap<String, TeamData>) -> String {
let mut sorted_list: Vec<_> = results.iter().collect();
sorted_list.sort_by(custom_sort);
sorted_list.reverse();
let mut results = String::new();
results.push_str("Team | MP | W | D | L | P\n");
for (ref team, ref data) in sorted_list {
let line = format!("{:<30} | {} | {} | {} | {} | {}\n",
team,
data.played,
data.won,
data.draw,
data.lost,
data.points);
results.push_str(&line);
}
results
}
pub fn tally(input: &Path, output: &Path) -> Option<usize> {
let games: Vec<Game> = read_input(input);
let results = calc_data(&games);
let content = pretty_print_results(&results);
write_output(&content, output);
Some(games.len())
}
|
{
games.push(game)
}
|
conditional_block
|
lib.rs
|
use std::fs::File;
use std::path::Path;
use std::str::FromStr;
use std::io::BufReader;
use std::io::prelude::*;
use std::collections::HashMap;
use std::cmp::Ordering;
enum GameResult {
Win,
Loss,
Draw,
}
struct Game {
team1: String,
team2: String,
result: GameResult,
}
struct TeamData {
played: usize,
won: usize,
draw: usize,
lost: usize,
points: usize,
}
impl TeamData {
fn new() -> TeamData {
TeamData {
played: 0,
won: 0,
draw: 0,
lost: 0,
points: 0,
}
}
}
impl FromStr for Game {
type Err = &'static str;
fn from_str(input: &str) -> Result<Game, Self::Err> {
let input = input.trim(); // remove trailing & leading whitespace
let input: Vec<&str> = input.split(";").collect();
if input.len()!= 3 {
return Err("Invalid line");
}
let outcame = match input[2] {
"win" => GameResult::Win,
"loss" => GameResult::Loss,
"draw" => GameResult::Draw,
_ => return Err("invalid outcame"),
};
Ok(Game {
team1: input[0].into(),
team2: input[1].into(),
result: outcame,
})
}
}
fn read_input(input: &Path) -> Vec<Game> {
let f = File::open(input).unwrap();
let file = BufReader::new(f);
let mut games = Vec::new();
for line in file.lines() {
if let Ok(game) = line.unwrap().parse::<Game>() {
games.push(game)
}
}
games
}
fn calc_data(games: &Vec<Game>) -> HashMap<String, TeamData> {
let mut teams: HashMap<String, TeamData> = HashMap::new();
for game in games {
let ((w1, l1, d1, p1), (w2, l2, d2, p2)) = match game.result {
GameResult::Win => ((1, 0, 0, 3), (0, 1, 0, 0)),
GameResult::Loss => ((0, 1, 0, 0), (1, 0, 0, 3)),
GameResult::Draw => ((0, 0, 1, 1), (0, 0, 1, 1)),
};
{
// new scope to satisfy the borrow checker
let team1 = teams.entry(game.team1.clone()).or_insert(TeamData::new());
team1.played += 1;
team1.won += w1;
team1.lost += l1;
team1.draw += d1;
team1.points += p1;
}
let team2 = teams.entry(game.team2.clone()).or_insert(TeamData::new());
team2.played += 1;
team2.won += w2;
team2.lost += l2;
team2.draw += d2;
team2.points += p2;
}
teams
}
fn write_output(content: &str, output: &Path) {
let mut buffer = File::create(output).unwrap();
buffer.write_all(content.as_bytes()).ok();
}
fn
|
(team1: &(&String, &TeamData), team2: &(&String, &TeamData)) -> Ordering {
match team1.1.points.cmp(&team2.1.points) {
Ordering::Equal => {
match team1.1.won.cmp(&team2.1.won) {
Ordering::Equal => team2.0.cmp(&team1.0),
ans @ _ => ans,
}
}
ans @ _ => ans,
}
}
fn pretty_print_results(results: &HashMap<String, TeamData>) -> String {
let mut sorted_list: Vec<_> = results.iter().collect();
sorted_list.sort_by(custom_sort);
sorted_list.reverse();
let mut results = String::new();
results.push_str("Team | MP | W | D | L | P\n");
for (ref team, ref data) in sorted_list {
let line = format!("{:<30} | {} | {} | {} | {} | {}\n",
team,
data.played,
data.won,
data.draw,
data.lost,
data.points);
results.push_str(&line);
}
results
}
pub fn tally(input: &Path, output: &Path) -> Option<usize> {
let games: Vec<Game> = read_input(input);
let results = calc_data(&games);
let content = pretty_print_results(&results);
write_output(&content, output);
Some(games.len())
}
|
custom_sort
|
identifier_name
|
lib.rs
|
use std::fs::File;
use std::path::Path;
use std::str::FromStr;
use std::io::BufReader;
use std::io::prelude::*;
use std::collections::HashMap;
use std::cmp::Ordering;
enum GameResult {
Win,
Loss,
Draw,
}
struct Game {
team1: String,
team2: String,
result: GameResult,
}
struct TeamData {
played: usize,
won: usize,
draw: usize,
lost: usize,
points: usize,
}
impl TeamData {
fn new() -> TeamData {
TeamData {
played: 0,
won: 0,
draw: 0,
lost: 0,
points: 0,
}
}
}
impl FromStr for Game {
type Err = &'static str;
fn from_str(input: &str) -> Result<Game, Self::Err> {
let input = input.trim(); // remove trailing & leading whitespace
let input: Vec<&str> = input.split(";").collect();
if input.len()!= 3 {
return Err("Invalid line");
}
let outcame = match input[2] {
"win" => GameResult::Win,
"loss" => GameResult::Loss,
"draw" => GameResult::Draw,
_ => return Err("invalid outcame"),
};
Ok(Game {
team1: input[0].into(),
team2: input[1].into(),
result: outcame,
})
}
}
fn read_input(input: &Path) -> Vec<Game> {
let f = File::open(input).unwrap();
let file = BufReader::new(f);
let mut games = Vec::new();
for line in file.lines() {
if let Ok(game) = line.unwrap().parse::<Game>() {
games.push(game)
}
}
games
}
fn calc_data(games: &Vec<Game>) -> HashMap<String, TeamData> {
let mut teams: HashMap<String, TeamData> = HashMap::new();
for game in games {
let ((w1, l1, d1, p1), (w2, l2, d2, p2)) = match game.result {
GameResult::Win => ((1, 0, 0, 3), (0, 1, 0, 0)),
GameResult::Loss => ((0, 1, 0, 0), (1, 0, 0, 3)),
GameResult::Draw => ((0, 0, 1, 1), (0, 0, 1, 1)),
};
{
// new scope to satisfy the borrow checker
let team1 = teams.entry(game.team1.clone()).or_insert(TeamData::new());
team1.played += 1;
team1.won += w1;
team1.lost += l1;
team1.draw += d1;
team1.points += p1;
}
let team2 = teams.entry(game.team2.clone()).or_insert(TeamData::new());
team2.played += 1;
team2.won += w2;
team2.lost += l2;
team2.draw += d2;
team2.points += p2;
}
teams
}
fn write_output(content: &str, output: &Path) {
let mut buffer = File::create(output).unwrap();
buffer.write_all(content.as_bytes()).ok();
}
fn custom_sort(team1: &(&String, &TeamData), team2: &(&String, &TeamData)) -> Ordering {
match team1.1.points.cmp(&team2.1.points) {
Ordering::Equal => {
match team1.1.won.cmp(&team2.1.won) {
Ordering::Equal => team2.0.cmp(&team1.0),
ans @ _ => ans,
}
}
ans @ _ => ans,
}
}
fn pretty_print_results(results: &HashMap<String, TeamData>) -> String {
let mut sorted_list: Vec<_> = results.iter().collect();
sorted_list.sort_by(custom_sort);
sorted_list.reverse();
let mut results = String::new();
results.push_str("Team | MP | W | D | L | P\n");
for (ref team, ref data) in sorted_list {
let line = format!("{:<30} | {} | {} | {} | {} | {}\n",
team,
data.played,
data.won,
data.draw,
data.lost,
data.points);
results.push_str(&line);
}
results
}
pub fn tally(input: &Path, output: &Path) -> Option<usize>
|
{
let games: Vec<Game> = read_input(input);
let results = calc_data(&games);
let content = pretty_print_results(&results);
write_output(&content, output);
Some(games.len())
}
|
identifier_body
|
|
lib.rs
|
use std::fs::File;
use std::path::Path;
use std::str::FromStr;
use std::io::BufReader;
use std::io::prelude::*;
use std::collections::HashMap;
use std::cmp::Ordering;
enum GameResult {
Win,
Loss,
Draw,
}
struct Game {
team1: String,
team2: String,
result: GameResult,
}
struct TeamData {
played: usize,
won: usize,
draw: usize,
lost: usize,
points: usize,
}
impl TeamData {
fn new() -> TeamData {
TeamData {
played: 0,
won: 0,
draw: 0,
lost: 0,
points: 0,
}
}
}
impl FromStr for Game {
type Err = &'static str;
fn from_str(input: &str) -> Result<Game, Self::Err> {
let input = input.trim(); // remove trailing & leading whitespace
let input: Vec<&str> = input.split(";").collect();
if input.len()!= 3 {
return Err("Invalid line");
}
let outcame = match input[2] {
|
"win" => GameResult::Win,
"loss" => GameResult::Loss,
"draw" => GameResult::Draw,
_ => return Err("invalid outcame"),
};
Ok(Game {
team1: input[0].into(),
team2: input[1].into(),
result: outcame,
})
}
}
fn read_input(input: &Path) -> Vec<Game> {
let f = File::open(input).unwrap();
let file = BufReader::new(f);
let mut games = Vec::new();
for line in file.lines() {
if let Ok(game) = line.unwrap().parse::<Game>() {
games.push(game)
}
}
games
}
fn calc_data(games: &Vec<Game>) -> HashMap<String, TeamData> {
let mut teams: HashMap<String, TeamData> = HashMap::new();
for game in games {
let ((w1, l1, d1, p1), (w2, l2, d2, p2)) = match game.result {
GameResult::Win => ((1, 0, 0, 3), (0, 1, 0, 0)),
GameResult::Loss => ((0, 1, 0, 0), (1, 0, 0, 3)),
GameResult::Draw => ((0, 0, 1, 1), (0, 0, 1, 1)),
};
{
// new scope to satisfy the borrow checker
let team1 = teams.entry(game.team1.clone()).or_insert(TeamData::new());
team1.played += 1;
team1.won += w1;
team1.lost += l1;
team1.draw += d1;
team1.points += p1;
}
let team2 = teams.entry(game.team2.clone()).or_insert(TeamData::new());
team2.played += 1;
team2.won += w2;
team2.lost += l2;
team2.draw += d2;
team2.points += p2;
}
teams
}
fn write_output(content: &str, output: &Path) {
let mut buffer = File::create(output).unwrap();
buffer.write_all(content.as_bytes()).ok();
}
fn custom_sort(team1: &(&String, &TeamData), team2: &(&String, &TeamData)) -> Ordering {
match team1.1.points.cmp(&team2.1.points) {
Ordering::Equal => {
match team1.1.won.cmp(&team2.1.won) {
Ordering::Equal => team2.0.cmp(&team1.0),
ans @ _ => ans,
}
}
ans @ _ => ans,
}
}
fn pretty_print_results(results: &HashMap<String, TeamData>) -> String {
let mut sorted_list: Vec<_> = results.iter().collect();
sorted_list.sort_by(custom_sort);
sorted_list.reverse();
let mut results = String::new();
results.push_str("Team | MP | W | D | L | P\n");
for (ref team, ref data) in sorted_list {
let line = format!("{:<30} | {} | {} | {} | {} | {}\n",
team,
data.played,
data.won,
data.draw,
data.lost,
data.points);
results.push_str(&line);
}
results
}
pub fn tally(input: &Path, output: &Path) -> Option<usize> {
let games: Vec<Game> = read_input(input);
let results = calc_data(&games);
let content = pretty_print_results(&results);
write_output(&content, output);
Some(games.len())
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.