file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
router.rs | use std::collections::HashMap;
use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver};
use tokio::net::TcpStream;
use protobuf_codec::MessageStream;
use protocol;
pub enum RoutingMessage {
Connecting {
stream: MessageStream<TcpStream, protocol::Packet>,
},
}
pub struct RoutingTable {
routing_channels: HashMap<Vec<u8>, UnboundedSender<RoutingMessage>>,
}
impl RoutingTable {
pub fn new() -> Self {
RoutingTable { | let (tx, rx) = unbounded();
self.routing_channels.insert(token.to_vec(), tx);
return rx;
}
pub fn get(&mut self, token: &[u8]) -> Option<UnboundedSender<RoutingMessage>> {
self.routing_channels.get(token).cloned()
}
pub fn remove(&mut self, token: &[u8]) {
self.routing_channels.remove(token);
}
} | routing_channels: HashMap::new(),
}
}
pub fn register(&mut self, token: &[u8]) -> UnboundedReceiver<RoutingMessage> { | random_line_split |
router.rs | use std::collections::HashMap;
use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver};
use tokio::net::TcpStream;
use protobuf_codec::MessageStream;
use protocol;
pub enum RoutingMessage {
Connecting {
stream: MessageStream<TcpStream, protocol::Packet>,
},
}
pub struct RoutingTable {
routing_channels: HashMap<Vec<u8>, UnboundedSender<RoutingMessage>>,
}
impl RoutingTable {
pub fn new() -> Self {
RoutingTable {
routing_channels: HashMap::new(),
}
}
pub fn | (&mut self, token: &[u8]) -> UnboundedReceiver<RoutingMessage> {
let (tx, rx) = unbounded();
self.routing_channels.insert(token.to_vec(), tx);
return rx;
}
pub fn get(&mut self, token: &[u8]) -> Option<UnboundedSender<RoutingMessage>> {
self.routing_channels.get(token).cloned()
}
pub fn remove(&mut self, token: &[u8]) {
self.routing_channels.remove(token);
}
} | register | identifier_name |
switch.rs | use arch;
use core::sync::atomic::Ordering;
use context::{contexts, Context, Status, CONTEXT_ID};
/// Switch to the next context.
pub unsafe fn switch() -> bool {
use core::ops::DerefMut;
// Set the global lock to avoid the unsafe operations below from causing issues
while arch::context::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
arch::interrupts::pause();
}
// get current CPU id
let cpu_id = ::cpu_id();
let from_ptr;
let mut to_ptr = 0 as *mut Context;
{
// get the list of context
let contexts = contexts();
// get the current context
{
let context_lock = contexts.current().expect("context::switch: not inside of context");
let mut context = context_lock.write();
from_ptr = context.deref_mut() as *mut Context;
}
let check_context = |context: &mut Context| -> bool {
// Set the CPU id on the context if none specified
if context.cpu_id == None && cpu_id == 0 {
context.cpu_id = Some(cpu_id);
}
// TODO unlock a context if there is new signals to be processed
// the process is on the current CPU, can be run but isn't running.
if context.cpu_id == Some(cpu_id) {
if context.status == Status::Runnable &&! context.running {
return true;
}
}
false
};
// find the next context to be executed
for (pid, context_lock) in contexts.iter() {
if *pid > (*from_ptr).id {
let mut context = context_lock.write();
if check_context(&mut context) |
}
}
}
// whether there is no contexts to switch to, we remove the lock and return false
if to_ptr as usize == 0 {
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
return false;
}
// mark the prev context as stopped
(&mut *from_ptr).running = false;
// mark the next context as running
(&mut *to_ptr).running = true;
// store the current context ID
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
// HACK: this is a temporary workaround, as arch is only used the the current CPU
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
// Switch to this new context
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);
true
}
| {
to_ptr = context.deref_mut() as *mut Context;
} | conditional_block |
switch.rs | use arch;
use core::sync::atomic::Ordering;
use context::{contexts, Context, Status, CONTEXT_ID};
/// Switch to the next context.
pub unsafe fn | () -> bool {
use core::ops::DerefMut;
// Set the global lock to avoid the unsafe operations below from causing issues
while arch::context::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
arch::interrupts::pause();
}
// get current CPU id
let cpu_id = ::cpu_id();
let from_ptr;
let mut to_ptr = 0 as *mut Context;
{
// get the list of context
let contexts = contexts();
// get the current context
{
let context_lock = contexts.current().expect("context::switch: not inside of context");
let mut context = context_lock.write();
from_ptr = context.deref_mut() as *mut Context;
}
let check_context = |context: &mut Context| -> bool {
// Set the CPU id on the context if none specified
if context.cpu_id == None && cpu_id == 0 {
context.cpu_id = Some(cpu_id);
}
// TODO unlock a context if there is new signals to be processed
// the process is on the current CPU, can be run but isn't running.
if context.cpu_id == Some(cpu_id) {
if context.status == Status::Runnable &&! context.running {
return true;
}
}
false
};
// find the next context to be executed
for (pid, context_lock) in contexts.iter() {
if *pid > (*from_ptr).id {
let mut context = context_lock.write();
if check_context(&mut context) {
to_ptr = context.deref_mut() as *mut Context;
}
}
}
}
// whether there is no contexts to switch to, we remove the lock and return false
if to_ptr as usize == 0 {
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
return false;
}
// mark the prev context as stopped
(&mut *from_ptr).running = false;
// mark the next context as running
(&mut *to_ptr).running = true;
// store the current context ID
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
// HACK: this is a temporary workaround, as arch is only used the the current CPU
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
// Switch to this new context
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);
true
}
| switch | identifier_name |
switch.rs | use arch;
use core::sync::atomic::Ordering;
use context::{contexts, Context, Status, CONTEXT_ID};
/// Switch to the next context.
pub unsafe fn switch() -> bool | let mut context = context_lock.write();
from_ptr = context.deref_mut() as *mut Context;
}
let check_context = |context: &mut Context| -> bool {
// Set the CPU id on the context if none specified
if context.cpu_id == None && cpu_id == 0 {
context.cpu_id = Some(cpu_id);
}
// TODO unlock a context if there is new signals to be processed
// the process is on the current CPU, can be run but isn't running.
if context.cpu_id == Some(cpu_id) {
if context.status == Status::Runnable &&! context.running {
return true;
}
}
false
};
// find the next context to be executed
for (pid, context_lock) in contexts.iter() {
if *pid > (*from_ptr).id {
let mut context = context_lock.write();
if check_context(&mut context) {
to_ptr = context.deref_mut() as *mut Context;
}
}
}
}
// whether there is no contexts to switch to, we remove the lock and return false
if to_ptr as usize == 0 {
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
return false;
}
// mark the prev context as stopped
(&mut *from_ptr).running = false;
// mark the next context as running
(&mut *to_ptr).running = true;
// store the current context ID
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
// HACK: this is a temporary workaround, as arch is only used the the current CPU
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
// Switch to this new context
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);
true
}
| {
use core::ops::DerefMut;
// Set the global lock to avoid the unsafe operations below from causing issues
while arch::context::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
arch::interrupts::pause();
}
// get current CPU id
let cpu_id = ::cpu_id();
let from_ptr;
let mut to_ptr = 0 as *mut Context;
{
// get the list of context
let contexts = contexts();
// get the current context
{
let context_lock = contexts.current().expect("context::switch: not inside of context"); | identifier_body |
switch.rs | use arch;
use core::sync::atomic::Ordering;
use context::{contexts, Context, Status, CONTEXT_ID};
/// Switch to the next context.
pub unsafe fn switch() -> bool {
use core::ops::DerefMut;
// Set the global lock to avoid the unsafe operations below from causing issues
while arch::context::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
arch::interrupts::pause();
}
// get current CPU id
let cpu_id = ::cpu_id();
let from_ptr;
let mut to_ptr = 0 as *mut Context;
{
// get the list of context
let contexts = contexts();
| from_ptr = context.deref_mut() as *mut Context;
}
let check_context = |context: &mut Context| -> bool {
// Set the CPU id on the context if none specified
if context.cpu_id == None && cpu_id == 0 {
context.cpu_id = Some(cpu_id);
}
// TODO unlock a context if there is new signals to be processed
// the process is on the current CPU, can be run but isn't running.
if context.cpu_id == Some(cpu_id) {
if context.status == Status::Runnable &&! context.running {
return true;
}
}
false
};
// find the next context to be executed
for (pid, context_lock) in contexts.iter() {
if *pid > (*from_ptr).id {
let mut context = context_lock.write();
if check_context(&mut context) {
to_ptr = context.deref_mut() as *mut Context;
}
}
}
}
// whether there is no contexts to switch to, we remove the lock and return false
if to_ptr as usize == 0 {
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
return false;
}
// mark the prev context as stopped
(&mut *from_ptr).running = false;
// mark the next context as running
(&mut *to_ptr).running = true;
// store the current context ID
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
// HACK: this is a temporary workaround, as arch is only used the the current CPU
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
// Switch to this new context
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);
true
} | // get the current context
{
let context_lock = contexts.current().expect("context::switch: not inside of context");
let mut context = context_lock.write(); | random_line_split |
example.rs | extern mod extra;
use extra::sort;
use std::hashmap::HashMap;
struct School {
priv grades: HashMap<uint, ~[~str]>
}
fn sorted<T: Clone + Ord>(array: &[T]) -> ~[T] {
let mut res = array.iter().map(|v| v.clone()).to_owned_vec();
sort::tim_sort(res);
res
}
impl School {
pub fn new() -> School { | pub fn add(self, grade: uint, student: &str) -> School {
let mut s = self;
s.grades.mangle(
grade,
student,
|_, x| ~[x.into_owned()],
|_, xs, x| xs.push(x.into_owned()));
s
}
pub fn sorted(self) -> ~[(uint, ~[~str])] {
sorted(self.grades.iter().map(|(&grade, students)| {
(grade, sorted(students.clone()))
}).to_owned_vec())
}
pub fn grade(self, grade: uint) -> ~[~str] {
self.grades.find(&grade).map(|&v| sorted(v.to_owned())).unwrap_or(~[])
}
} | School { grades: HashMap::new() }
}
| random_line_split |
example.rs | extern mod extra;
use extra::sort;
use std::hashmap::HashMap;
struct School {
priv grades: HashMap<uint, ~[~str]>
}
fn sorted<T: Clone + Ord>(array: &[T]) -> ~[T] {
let mut res = array.iter().map(|v| v.clone()).to_owned_vec();
sort::tim_sort(res);
res
}
impl School {
pub fn new() -> School {
School { grades: HashMap::new() }
}
pub fn add(self, grade: uint, student: &str) -> School {
let mut s = self;
s.grades.mangle(
grade,
student,
|_, x| ~[x.into_owned()],
|_, xs, x| xs.push(x.into_owned()));
s
}
pub fn sorted(self) -> ~[(uint, ~[~str])] {
sorted(self.grades.iter().map(|(&grade, students)| {
(grade, sorted(students.clone()))
}).to_owned_vec())
}
pub fn | (self, grade: uint) -> ~[~str] {
self.grades.find(&grade).map(|&v| sorted(v.to_owned())).unwrap_or(~[])
}
}
| grade | identifier_name |
example.rs | extern mod extra;
use extra::sort;
use std::hashmap::HashMap;
struct School {
priv grades: HashMap<uint, ~[~str]>
}
fn sorted<T: Clone + Ord>(array: &[T]) -> ~[T] {
let mut res = array.iter().map(|v| v.clone()).to_owned_vec();
sort::tim_sort(res);
res
}
impl School {
pub fn new() -> School {
School { grades: HashMap::new() }
}
pub fn add(self, grade: uint, student: &str) -> School |
pub fn sorted(self) -> ~[(uint, ~[~str])] {
sorted(self.grades.iter().map(|(&grade, students)| {
(grade, sorted(students.clone()))
}).to_owned_vec())
}
pub fn grade(self, grade: uint) -> ~[~str] {
self.grades.find(&grade).map(|&v| sorted(v.to_owned())).unwrap_or(~[])
}
}
| {
let mut s = self;
s.grades.mangle(
grade,
student,
|_, x| ~[x.into_owned()],
|_, xs, x| xs.push(x.into_owned()));
s
} | identifier_body |
pic.rs | /*
* 8259A PIC interface
*
* TODO: some code here is common with PIC implementation in xvm
* make some common definition crate if possible
*/
use arch;
use pio::{inb, outb};
const PIC_MASTER_CMD: u16 = 0x20;
const PIC_MASTER_DATA: u16 = 0x21;
const PIC_SLAVE_CMD: u16 = 0xA0;
const PIC_SLAVE_DATA: u16 = 0xA1;
const ICW1_INIT: u8 = 0x10;
const ICW1_ICW4: u8 = 0x01;
const ICW4_8086: u8 = 0x01;
const PIC_READ_IRR: u8 = 0x0A;
const PIC_READ_ISR: u8 = 0x0B;
const PIC_EOI: u8 = 0x20;
static mut OFFSETS: u16 = 0xFFFF;
pub fn slave_arg(val: u16) -> u8 {
(val >> 8) as u8
}
pub fn master_arg(val: u16) -> u8 {
val as u8
}
pub fn make_arg(master: u8, slave: u8) -> u16 {
master as u16 | ((slave as u16) << 8)
}
/**
* Reset PIC to new vector bases and interrupts masks
*
* \offset Vector offsets for master (LSB) and slave (MSB)
* \mask Interrupt masks for master (LSB) and slave (MSB)
*/
pub fn reset(offset: u16, mask: u16)
{
let iscope = arch::InterruptGuard::default();
unsafe {
// ICW1
outb(PIC_MASTER_CMD, ICW1_INIT | ICW1_ICW4);
outb(PIC_SLAVE_CMD, ICW1_INIT | ICW1_ICW4);
// ICW2 (vector offsets)
outb(PIC_MASTER_DATA, master_arg(offset));
outb(PIC_SLAVE_DATA, slave_arg(offset));
// ICW3 (cascade)
outb(PIC_MASTER_DATA, 4);
outb(PIC_SLAVE_DATA, 2);
// ICW4
outb(PIC_MASTER_DATA, ICW4_8086);
outb(PIC_SLAVE_DATA, ICW4_8086);
// Masks
set_mask(mask);
OFFSETS = offset;
}
}
pub fn mask() -> u16
{
unsafe {
make_arg(inb(PIC_MASTER_DATA), inb(PIC_SLAVE_DATA))
}
}
pub fn set_mask(mask: u16)
{
unsafe {
outb(PIC_MASTER_DATA, master_arg(mask));
outb(PIC_SLAVE_DATA, slave_arg(mask));
}
}
pub fn set_mask2(master: u8, slave: u8)
{
set_mask(make_arg(master, slave));
}
pub fn offset() -> u16
{
unsafe {
OFFSETS
}
}
pub fn master_offset() -> u8
{
master_arg(offset())
}
pub fn slave_offset() -> u8
{
slave_arg(offset())
}
// TODO: move to common utils code
fn set_bit8(val: u8, bit: u8) -> u8 {
assert!(bit < 8);
val | (1_u8 << bit)
}
// TODO: move to common utils code
fn clear_bit8(val: u8, bit: u8) -> u8 {
assert!(bit < 8);
val &!(1_u8 << bit)
}
/**
* For a given IRQ number returns interrupt vector for current PIC configuration
*/
pub fn get_interrupt_vector(irq: u8) -> u8
{
if irq < 8 {
master_offset() + irq
} else if irq < 16 {
slave_offset() + irq
} else {
panic!()
}
}
/**
* Mask or unmask and IRQ given its interrupt vector
*/
pub fn mask_vector(vec: u8, is_masked: bool)
{
let mask = mask();
let mut master = master_arg(mask);
let mut slave = slave_arg(mask);
if vec >= master_offset() && vec < (master_offset() + 8) {
if is_masked {
master = set_bit8(master, vec - master_offset());
} else {
master = clear_bit8(master, vec - master_offset());
}
} else if vec >= slave_offset() && vec < (slave_offset() + 8) {
if is_masked {
slave = set_bit8(slave, vec - master_offset());
} else {
slave = clear_bit8(slave, vec - master_offset());
}
}
set_mask2(master, slave);
}
/**
* Send end-of-interrupt for IRQ
*/
pub fn EOI(irq: u8)
{
unsafe {
if irq >= 8 {
outb(PIC_SLAVE_CMD, PIC_EOI);
}
outb(PIC_MASTER_CMD, PIC_EOI);
}
}
| unsafe {
outb(PIC_MASTER_CMD, PIC_READ_ISR);
outb(PIC_SLAVE_CMD, PIC_READ_ISR);
make_arg(inb(PIC_MASTER_CMD), inb(PIC_SLAVE_CMD))
}
}
/**
* Read IRR registers
*/
pub fn IRR() -> u16
{
unsafe {
outb(PIC_MASTER_CMD, PIC_READ_IRR);
outb(PIC_SLAVE_CMD, PIC_READ_IRR);
make_arg(inb(PIC_MASTER_CMD), inb(PIC_SLAVE_CMD))
}
} | /**
* Read ISR registers
*/
pub fn ISR() -> u16
{ | random_line_split |
pic.rs | /*
* 8259A PIC interface
*
* TODO: some code here is common with PIC implementation in xvm
* make some common definition crate if possible
*/
use arch;
use pio::{inb, outb};
const PIC_MASTER_CMD: u16 = 0x20;
const PIC_MASTER_DATA: u16 = 0x21;
const PIC_SLAVE_CMD: u16 = 0xA0;
const PIC_SLAVE_DATA: u16 = 0xA1;
const ICW1_INIT: u8 = 0x10;
const ICW1_ICW4: u8 = 0x01;
const ICW4_8086: u8 = 0x01;
const PIC_READ_IRR: u8 = 0x0A;
const PIC_READ_ISR: u8 = 0x0B;
const PIC_EOI: u8 = 0x20;
static mut OFFSETS: u16 = 0xFFFF;
pub fn slave_arg(val: u16) -> u8 {
(val >> 8) as u8
}
pub fn master_arg(val: u16) -> u8 {
val as u8
}
pub fn make_arg(master: u8, slave: u8) -> u16 {
master as u16 | ((slave as u16) << 8)
}
/**
* Reset PIC to new vector bases and interrupts masks
*
* \offset Vector offsets for master (LSB) and slave (MSB)
* \mask Interrupt masks for master (LSB) and slave (MSB)
*/
pub fn reset(offset: u16, mask: u16)
{
let iscope = arch::InterruptGuard::default();
unsafe {
// ICW1
outb(PIC_MASTER_CMD, ICW1_INIT | ICW1_ICW4);
outb(PIC_SLAVE_CMD, ICW1_INIT | ICW1_ICW4);
// ICW2 (vector offsets)
outb(PIC_MASTER_DATA, master_arg(offset));
outb(PIC_SLAVE_DATA, slave_arg(offset));
// ICW3 (cascade)
outb(PIC_MASTER_DATA, 4);
outb(PIC_SLAVE_DATA, 2);
// ICW4
outb(PIC_MASTER_DATA, ICW4_8086);
outb(PIC_SLAVE_DATA, ICW4_8086);
// Masks
set_mask(mask);
OFFSETS = offset;
}
}
pub fn mask() -> u16
{
unsafe {
make_arg(inb(PIC_MASTER_DATA), inb(PIC_SLAVE_DATA))
}
}
pub fn set_mask(mask: u16)
{
unsafe {
outb(PIC_MASTER_DATA, master_arg(mask));
outb(PIC_SLAVE_DATA, slave_arg(mask));
}
}
pub fn set_mask2(master: u8, slave: u8)
{
set_mask(make_arg(master, slave));
}
pub fn offset() -> u16
{
unsafe {
OFFSETS
}
}
pub fn master_offset() -> u8
{
master_arg(offset())
}
pub fn slave_offset() -> u8
{
slave_arg(offset())
}
// TODO: move to common utils code
fn set_bit8(val: u8, bit: u8) -> u8 {
assert!(bit < 8);
val | (1_u8 << bit)
}
// TODO: move to common utils code
fn clear_bit8(val: u8, bit: u8) -> u8 {
assert!(bit < 8);
val &!(1_u8 << bit)
}
/**
* For a given IRQ number returns interrupt vector for current PIC configuration
*/
pub fn get_interrupt_vector(irq: u8) -> u8
{
if irq < 8 {
master_offset() + irq
} else if irq < 16 {
slave_offset() + irq
} else {
panic!()
}
}
/**
* Mask or unmask and IRQ given its interrupt vector
*/
pub fn mask_vector(vec: u8, is_masked: bool)
{
let mask = mask();
let mut master = master_arg(mask);
let mut slave = slave_arg(mask);
if vec >= master_offset() && vec < (master_offset() + 8) {
if is_masked {
master = set_bit8(master, vec - master_offset());
} else {
master = clear_bit8(master, vec - master_offset());
}
} else if vec >= slave_offset() && vec < (slave_offset() + 8) {
if is_masked {
slave = set_bit8(slave, vec - master_offset());
} else {
slave = clear_bit8(slave, vec - master_offset());
}
}
set_mask2(master, slave);
}
/**
* Send end-of-interrupt for IRQ
*/
pub fn EOI(irq: u8)
{
unsafe {
if irq >= 8 {
outb(PIC_SLAVE_CMD, PIC_EOI);
}
outb(PIC_MASTER_CMD, PIC_EOI);
}
}
/**
* Read ISR registers
*/
pub fn | () -> u16
{
unsafe {
outb(PIC_MASTER_CMD, PIC_READ_ISR);
outb(PIC_SLAVE_CMD, PIC_READ_ISR);
make_arg(inb(PIC_MASTER_CMD), inb(PIC_SLAVE_CMD))
}
}
/**
* Read IRR registers
*/
pub fn IRR() -> u16
{
unsafe {
outb(PIC_MASTER_CMD, PIC_READ_IRR);
outb(PIC_SLAVE_CMD, PIC_READ_IRR);
make_arg(inb(PIC_MASTER_CMD), inb(PIC_SLAVE_CMD))
}
}
| ISR | identifier_name |
pic.rs | /*
* 8259A PIC interface
*
* TODO: some code here is common with PIC implementation in xvm
* make some common definition crate if possible
*/
use arch;
use pio::{inb, outb};
const PIC_MASTER_CMD: u16 = 0x20;
const PIC_MASTER_DATA: u16 = 0x21;
const PIC_SLAVE_CMD: u16 = 0xA0;
const PIC_SLAVE_DATA: u16 = 0xA1;
const ICW1_INIT: u8 = 0x10;
const ICW1_ICW4: u8 = 0x01;
const ICW4_8086: u8 = 0x01;
const PIC_READ_IRR: u8 = 0x0A;
const PIC_READ_ISR: u8 = 0x0B;
const PIC_EOI: u8 = 0x20;
static mut OFFSETS: u16 = 0xFFFF;
pub fn slave_arg(val: u16) -> u8 {
(val >> 8) as u8
}
pub fn master_arg(val: u16) -> u8 {
val as u8
}
pub fn make_arg(master: u8, slave: u8) -> u16 {
master as u16 | ((slave as u16) << 8)
}
/**
* Reset PIC to new vector bases and interrupts masks
*
* \offset Vector offsets for master (LSB) and slave (MSB)
* \mask Interrupt masks for master (LSB) and slave (MSB)
*/
pub fn reset(offset: u16, mask: u16)
{
let iscope = arch::InterruptGuard::default();
unsafe {
// ICW1
outb(PIC_MASTER_CMD, ICW1_INIT | ICW1_ICW4);
outb(PIC_SLAVE_CMD, ICW1_INIT | ICW1_ICW4);
// ICW2 (vector offsets)
outb(PIC_MASTER_DATA, master_arg(offset));
outb(PIC_SLAVE_DATA, slave_arg(offset));
// ICW3 (cascade)
outb(PIC_MASTER_DATA, 4);
outb(PIC_SLAVE_DATA, 2);
// ICW4
outb(PIC_MASTER_DATA, ICW4_8086);
outb(PIC_SLAVE_DATA, ICW4_8086);
// Masks
set_mask(mask);
OFFSETS = offset;
}
}
pub fn mask() -> u16
{
unsafe {
make_arg(inb(PIC_MASTER_DATA), inb(PIC_SLAVE_DATA))
}
}
pub fn set_mask(mask: u16)
{
unsafe {
outb(PIC_MASTER_DATA, master_arg(mask));
outb(PIC_SLAVE_DATA, slave_arg(mask));
}
}
pub fn set_mask2(master: u8, slave: u8)
{
set_mask(make_arg(master, slave));
}
pub fn offset() -> u16
{
unsafe {
OFFSETS
}
}
pub fn master_offset() -> u8
|
pub fn slave_offset() -> u8
{
slave_arg(offset())
}
// TODO: move to common utils code
fn set_bit8(val: u8, bit: u8) -> u8 {
assert!(bit < 8);
val | (1_u8 << bit)
}
// TODO: move to common utils code
fn clear_bit8(val: u8, bit: u8) -> u8 {
assert!(bit < 8);
val &!(1_u8 << bit)
}
/**
* For a given IRQ number returns interrupt vector for current PIC configuration
*/
pub fn get_interrupt_vector(irq: u8) -> u8
{
if irq < 8 {
master_offset() + irq
} else if irq < 16 {
slave_offset() + irq
} else {
panic!()
}
}
/**
* Mask or unmask and IRQ given its interrupt vector
*/
pub fn mask_vector(vec: u8, is_masked: bool)
{
let mask = mask();
let mut master = master_arg(mask);
let mut slave = slave_arg(mask);
if vec >= master_offset() && vec < (master_offset() + 8) {
if is_masked {
master = set_bit8(master, vec - master_offset());
} else {
master = clear_bit8(master, vec - master_offset());
}
} else if vec >= slave_offset() && vec < (slave_offset() + 8) {
if is_masked {
slave = set_bit8(slave, vec - master_offset());
} else {
slave = clear_bit8(slave, vec - master_offset());
}
}
set_mask2(master, slave);
}
/**
* Send end-of-interrupt for IRQ
*/
pub fn EOI(irq: u8)
{
unsafe {
if irq >= 8 {
outb(PIC_SLAVE_CMD, PIC_EOI);
}
outb(PIC_MASTER_CMD, PIC_EOI);
}
}
/**
* Read ISR registers
*/
pub fn ISR() -> u16
{
unsafe {
outb(PIC_MASTER_CMD, PIC_READ_ISR);
outb(PIC_SLAVE_CMD, PIC_READ_ISR);
make_arg(inb(PIC_MASTER_CMD), inb(PIC_SLAVE_CMD))
}
}
/**
* Read IRR registers
*/
pub fn IRR() -> u16
{
unsafe {
outb(PIC_MASTER_CMD, PIC_READ_IRR);
outb(PIC_SLAVE_CMD, PIC_READ_IRR);
make_arg(inb(PIC_MASTER_CMD), inb(PIC_SLAVE_CMD))
}
}
| {
master_arg(offset())
} | identifier_body |
pic.rs | /*
* 8259A PIC interface
*
* TODO: some code here is common with PIC implementation in xvm
* make some common definition crate if possible
*/
use arch;
use pio::{inb, outb};
const PIC_MASTER_CMD: u16 = 0x20;
const PIC_MASTER_DATA: u16 = 0x21;
const PIC_SLAVE_CMD: u16 = 0xA0;
const PIC_SLAVE_DATA: u16 = 0xA1;
const ICW1_INIT: u8 = 0x10;
const ICW1_ICW4: u8 = 0x01;
const ICW4_8086: u8 = 0x01;
const PIC_READ_IRR: u8 = 0x0A;
const PIC_READ_ISR: u8 = 0x0B;
const PIC_EOI: u8 = 0x20;
static mut OFFSETS: u16 = 0xFFFF;
pub fn slave_arg(val: u16) -> u8 {
(val >> 8) as u8
}
pub fn master_arg(val: u16) -> u8 {
val as u8
}
pub fn make_arg(master: u8, slave: u8) -> u16 {
master as u16 | ((slave as u16) << 8)
}
/**
* Reset PIC to new vector bases and interrupts masks
*
* \offset Vector offsets for master (LSB) and slave (MSB)
* \mask Interrupt masks for master (LSB) and slave (MSB)
*/
pub fn reset(offset: u16, mask: u16)
{
let iscope = arch::InterruptGuard::default();
unsafe {
// ICW1
outb(PIC_MASTER_CMD, ICW1_INIT | ICW1_ICW4);
outb(PIC_SLAVE_CMD, ICW1_INIT | ICW1_ICW4);
// ICW2 (vector offsets)
outb(PIC_MASTER_DATA, master_arg(offset));
outb(PIC_SLAVE_DATA, slave_arg(offset));
// ICW3 (cascade)
outb(PIC_MASTER_DATA, 4);
outb(PIC_SLAVE_DATA, 2);
// ICW4
outb(PIC_MASTER_DATA, ICW4_8086);
outb(PIC_SLAVE_DATA, ICW4_8086);
// Masks
set_mask(mask);
OFFSETS = offset;
}
}
pub fn mask() -> u16
{
unsafe {
make_arg(inb(PIC_MASTER_DATA), inb(PIC_SLAVE_DATA))
}
}
pub fn set_mask(mask: u16)
{
unsafe {
outb(PIC_MASTER_DATA, master_arg(mask));
outb(PIC_SLAVE_DATA, slave_arg(mask));
}
}
pub fn set_mask2(master: u8, slave: u8)
{
set_mask(make_arg(master, slave));
}
pub fn offset() -> u16
{
unsafe {
OFFSETS
}
}
pub fn master_offset() -> u8
{
master_arg(offset())
}
pub fn slave_offset() -> u8
{
slave_arg(offset())
}
// TODO: move to common utils code
fn set_bit8(val: u8, bit: u8) -> u8 {
assert!(bit < 8);
val | (1_u8 << bit)
}
// TODO: move to common utils code
fn clear_bit8(val: u8, bit: u8) -> u8 {
assert!(bit < 8);
val &!(1_u8 << bit)
}
/**
* For a given IRQ number returns interrupt vector for current PIC configuration
*/
pub fn get_interrupt_vector(irq: u8) -> u8
{
if irq < 8 {
master_offset() + irq
} else if irq < 16 {
slave_offset() + irq
} else |
}
/**
* Mask or unmask and IRQ given its interrupt vector
*/
pub fn mask_vector(vec: u8, is_masked: bool)
{
let mask = mask();
let mut master = master_arg(mask);
let mut slave = slave_arg(mask);
if vec >= master_offset() && vec < (master_offset() + 8) {
if is_masked {
master = set_bit8(master, vec - master_offset());
} else {
master = clear_bit8(master, vec - master_offset());
}
} else if vec >= slave_offset() && vec < (slave_offset() + 8) {
if is_masked {
slave = set_bit8(slave, vec - master_offset());
} else {
slave = clear_bit8(slave, vec - master_offset());
}
}
set_mask2(master, slave);
}
/**
* Send end-of-interrupt for IRQ
*/
pub fn EOI(irq: u8)
{
unsafe {
if irq >= 8 {
outb(PIC_SLAVE_CMD, PIC_EOI);
}
outb(PIC_MASTER_CMD, PIC_EOI);
}
}
/**
* Read ISR registers
*/
pub fn ISR() -> u16
{
unsafe {
outb(PIC_MASTER_CMD, PIC_READ_ISR);
outb(PIC_SLAVE_CMD, PIC_READ_ISR);
make_arg(inb(PIC_MASTER_CMD), inb(PIC_SLAVE_CMD))
}
}
/**
* Read IRR registers
*/
pub fn IRR() -> u16
{
unsafe {
outb(PIC_MASTER_CMD, PIC_READ_IRR);
outb(PIC_SLAVE_CMD, PIC_READ_IRR);
make_arg(inb(PIC_MASTER_CMD), inb(PIC_SLAVE_CMD))
}
}
| {
panic!()
} | conditional_block |
lib.rs | use std::str::FromStr;
#[macro_use]
extern crate nom;
use std::collections::HashMap;
| pub struct Config {
map: HashMap<String, String>,
}
impl Config {
pub fn get(&self, property_name: &str) -> Option<&str> {
self.map.get(property_name).map(|s| s.as_str())
}
fn from_lines(lines: Vec<Line>) -> Config {
let mut map = HashMap::new();
for line in lines {
match line {
Line::KeyValue(key, value) => {
map.insert(key, value);
}
Line::Comment(_) => {}
};
}
Config { map: map }
}
}
impl FromStr for Config {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let lines = try!(get_lines(s));
Ok(Config::from_lines(lines))
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::parser::Line;
#[test]
fn single_key_value() {
let config = Config::from_lines(vec![Line::KeyValue("hostname".to_string(),
"dynamo".to_string())]);
assert_eq!(config.get("hostname"), Some("dynamo"));
}
#[test]
fn missing_key_value() {
let config = Config::from_lines(vec![Line::KeyValue("hostname".to_string(),
"dynamo".to_string())]);
assert_eq!(config.get("port"), None);
}
#[test]
fn many_key_values() {
let config =
Config::from_lines(vec![Line::KeyValue("hostname".to_string(), "dynamo".to_string()),
Line::KeyValue("port".to_string(), "5153".to_string()),
Line::KeyValue("path".to_string(), "/foo/bar".to_string())]);
assert_eq!(config.get("hostname"), Some("dynamo"));
assert_eq!(config.get("port"), Some("5153"));
assert_eq!(config.get("path"), Some("/foo/bar"));
}
#[test]
fn comments() {
let config = Config::from_lines(vec![Line::Comment("anything".to_string())]);
assert_eq!(config.get("hostname"), None);
}
} | mod parser;
use parser::{get_lines, Line};
pub use parser::ParseError;
#[derive(Debug)] | random_line_split |
lib.rs | use std::str::FromStr;
#[macro_use]
extern crate nom;
use std::collections::HashMap;
mod parser;
use parser::{get_lines, Line};
pub use parser::ParseError;
#[derive(Debug)]
pub struct Config {
map: HashMap<String, String>,
}
impl Config {
pub fn get(&self, property_name: &str) -> Option<&str> {
self.map.get(property_name).map(|s| s.as_str())
}
fn from_lines(lines: Vec<Line>) -> Config {
let mut map = HashMap::new();
for line in lines {
match line {
Line::KeyValue(key, value) => {
map.insert(key, value);
}
Line::Comment(_) => {}
};
}
Config { map: map }
}
}
impl FromStr for Config {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let lines = try!(get_lines(s));
Ok(Config::from_lines(lines))
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::parser::Line;
#[test]
fn single_key_value() |
#[test]
fn missing_key_value() {
let config = Config::from_lines(vec![Line::KeyValue("hostname".to_string(),
"dynamo".to_string())]);
assert_eq!(config.get("port"), None);
}
#[test]
fn many_key_values() {
let config =
Config::from_lines(vec![Line::KeyValue("hostname".to_string(), "dynamo".to_string()),
Line::KeyValue("port".to_string(), "5153".to_string()),
Line::KeyValue("path".to_string(), "/foo/bar".to_string())]);
assert_eq!(config.get("hostname"), Some("dynamo"));
assert_eq!(config.get("port"), Some("5153"));
assert_eq!(config.get("path"), Some("/foo/bar"));
}
#[test]
fn comments() {
let config = Config::from_lines(vec![Line::Comment("anything".to_string())]);
assert_eq!(config.get("hostname"), None);
}
}
| {
let config = Config::from_lines(vec![Line::KeyValue("hostname".to_string(),
"dynamo".to_string())]);
assert_eq!(config.get("hostname"), Some("dynamo"));
} | identifier_body |
lib.rs | use std::str::FromStr;
#[macro_use]
extern crate nom;
use std::collections::HashMap;
mod parser;
use parser::{get_lines, Line};
pub use parser::ParseError;
#[derive(Debug)]
pub struct Config {
map: HashMap<String, String>,
}
impl Config {
pub fn get(&self, property_name: &str) -> Option<&str> {
self.map.get(property_name).map(|s| s.as_str())
}
fn from_lines(lines: Vec<Line>) -> Config {
let mut map = HashMap::new();
for line in lines {
match line {
Line::KeyValue(key, value) => |
Line::Comment(_) => {}
};
}
Config { map: map }
}
}
impl FromStr for Config {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let lines = try!(get_lines(s));
Ok(Config::from_lines(lines))
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::parser::Line;
#[test]
fn single_key_value() {
let config = Config::from_lines(vec![Line::KeyValue("hostname".to_string(),
"dynamo".to_string())]);
assert_eq!(config.get("hostname"), Some("dynamo"));
}
#[test]
fn missing_key_value() {
let config = Config::from_lines(vec![Line::KeyValue("hostname".to_string(),
"dynamo".to_string())]);
assert_eq!(config.get("port"), None);
}
#[test]
fn many_key_values() {
let config =
Config::from_lines(vec![Line::KeyValue("hostname".to_string(), "dynamo".to_string()),
Line::KeyValue("port".to_string(), "5153".to_string()),
Line::KeyValue("path".to_string(), "/foo/bar".to_string())]);
assert_eq!(config.get("hostname"), Some("dynamo"));
assert_eq!(config.get("port"), Some("5153"));
assert_eq!(config.get("path"), Some("/foo/bar"));
}
#[test]
fn comments() {
let config = Config::from_lines(vec![Line::Comment("anything".to_string())]);
assert_eq!(config.get("hostname"), None);
}
}
| {
map.insert(key, value);
} | conditional_block |
lib.rs | use std::str::FromStr;
#[macro_use]
extern crate nom;
use std::collections::HashMap;
mod parser;
use parser::{get_lines, Line};
pub use parser::ParseError;
#[derive(Debug)]
pub struct Config {
map: HashMap<String, String>,
}
impl Config {
pub fn get(&self, property_name: &str) -> Option<&str> {
self.map.get(property_name).map(|s| s.as_str())
}
fn from_lines(lines: Vec<Line>) -> Config {
let mut map = HashMap::new();
for line in lines {
match line {
Line::KeyValue(key, value) => {
map.insert(key, value);
}
Line::Comment(_) => {}
};
}
Config { map: map }
}
}
impl FromStr for Config {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let lines = try!(get_lines(s));
Ok(Config::from_lines(lines))
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::parser::Line;
#[test]
fn single_key_value() {
let config = Config::from_lines(vec![Line::KeyValue("hostname".to_string(),
"dynamo".to_string())]);
assert_eq!(config.get("hostname"), Some("dynamo"));
}
#[test]
fn | () {
let config = Config::from_lines(vec![Line::KeyValue("hostname".to_string(),
"dynamo".to_string())]);
assert_eq!(config.get("port"), None);
}
#[test]
fn many_key_values() {
let config =
Config::from_lines(vec![Line::KeyValue("hostname".to_string(), "dynamo".to_string()),
Line::KeyValue("port".to_string(), "5153".to_string()),
Line::KeyValue("path".to_string(), "/foo/bar".to_string())]);
assert_eq!(config.get("hostname"), Some("dynamo"));
assert_eq!(config.get("port"), Some("5153"));
assert_eq!(config.get("path"), Some("/foo/bar"));
}
#[test]
fn comments() {
let config = Config::from_lines(vec![Line::Comment("anything".to_string())]);
assert_eq!(config.get("hostname"), None);
}
}
| missing_key_value | identifier_name |
size_of.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use script::test::size_of;
// Macro so that we can stringify type names
// I'd really prefer the tests themselves to be run at plugin time,
// however rustc::middle doesn't have access to the full type data
macro_rules! sizeof_checker (
($testname: ident, $t: ident, $known_size: expr) => (
#[test]
fn $testname() {
let new = size_of::$t();
let old = $known_size;
if new < old {
panic!("Your changes have decreased the stack size of commonly used DOM struct {} from {} to {}. \
Good work! Please update the size in tests/unit/script/size_of.rs.",
stringify!($t), old, new)
} else if new > old {
panic!("Your changes have increased the stack size of commonly used DOM struct {} from {} to {}. \
These structs are present in large quantities in the DOM, and increasing the size \
may dramatically affect our memory footprint. Please consider choosing a design which \
avoids this increase. If you feel that the increase is necessary, \ | }
});
);
// Update the sizes here
sizeof_checker!(size_event_target, EventTarget, 48);
sizeof_checker!(size_node, Node, 184);
sizeof_checker!(size_element, Element, 360);
sizeof_checker!(size_htmlelement, HTMLElement, 376);
sizeof_checker!(size_div, HTMLDivElement, 376);
sizeof_checker!(size_span, HTMLSpanElement, 376);
sizeof_checker!(size_text, Text, 216);
sizeof_checker!(size_characterdata, CharacterData, 216); | update to the new size in tests/unit/script/size_of.rs.",
stringify!($t), old, new) | random_line_split |
mod.rs | use token;
use roolang_regex;
use common::ParseError as ParseError;
mod test;
#[derive(Clone)]
#[derive(PartialEq)]
pub enum Constant {
IntegerConst {value: i32},
BooleanConst {value: bool},
CharSeqConst {value: String},
DecimalConst {value: f64}
}
pub fn get_constant(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
let first_token = &tokens[start].text;
if roolang_regex::is_numeric(first_token) {
return as_number(&tokens, start, false);
} else if first_token == "-" {
if tokens.len() < start + 1 {
return bad_token(&tokens[start], "Unexcpected end");
} else if roolang_regex::is_numeric(&tokens[start + 1].text) {
return as_number(&tokens, start + 1, true);
} else {
return bad_token(&tokens[start], "Unexcpected token, expecting a number");
}
} else if roolang_regex::is_boolean(first_token) {
return as_boolean(&tokens, start);
} else if roolang_regex::is_string(first_token) {
return as_string(&tokens, start);
}
return bad_token(&tokens[start], "Unexcpected token, expecting Int, Dec, String, Bool, or type");
}
fn as_number(tokens: &Vec<token::Token>, start: usize, negative: bool) -> (Result<Constant, ParseError>, usize) |
fn as_decimal_number(tokens: &Vec<token::Token>, whole_number: String, start: usize, negative: bool) -> (Result<Constant, ParseError>, usize) {
let mut number = whole_number.clone();
number.push_str(".");
if tokens.len() < start {
return bad_token(&tokens[start], "Unexcpected end");
} else if roolang_regex::is_numeric(&tokens[start].text) {
number.push_str(&tokens[start].text);
} else {
return bad_token(&tokens[start], "Unexcpected token, expecting numeric value");
}
let val = match number.parse::<f64>() {
Ok(n) => if negative {n * -1.0} else {n},
Err(_) => 0.0
};
return (Ok(Constant::DecimalConst {value: val}), start + 1)
}
fn as_boolean(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
return (Ok(Constant::BooleanConst {value: &tokens[start].text == "true"}), start + 1);
}
fn as_string(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
let mut val = tokens[start].text.clone();
val.remove(tokens[start].text.len() - 1);
val.remove(0);
return (Ok(Constant::CharSeqConst{value: val.clone()}), start + 1);
}
fn bad_token(token: &token::Token, msg: &str) -> (Result<Constant, ParseError>, usize) {
return (Err(ParseError::new(&token, msg)), 0);
}
| {
let number = tokens[start].text.clone();
if tokens.len() > start + 1 && &tokens[start + 1].text == "." {
return as_decimal_number(&tokens, number, start + 2, negative);
}
let val = match number.parse::<i32>() {
Ok(n) => if negative {n * -1} else {n},
Err(_) => 0
};
return (Ok(Constant::IntegerConst {value: val}), start + 1);
} | identifier_body |
mod.rs | use token;
use roolang_regex;
use common::ParseError as ParseError;
mod test;
#[derive(Clone)]
#[derive(PartialEq)]
pub enum Constant {
IntegerConst {value: i32},
BooleanConst {value: bool},
CharSeqConst {value: String},
DecimalConst {value: f64}
}
pub fn get_constant(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
let first_token = &tokens[start].text;
if roolang_regex::is_numeric(first_token) {
return as_number(&tokens, start, false);
} else if first_token == "-" {
if tokens.len() < start + 1 {
return bad_token(&tokens[start], "Unexcpected end");
} else if roolang_regex::is_numeric(&tokens[start + 1].text) {
return as_number(&tokens, start + 1, true);
} else {
return bad_token(&tokens[start], "Unexcpected token, expecting a number");
}
} else if roolang_regex::is_boolean(first_token) {
return as_boolean(&tokens, start);
} else if roolang_regex::is_string(first_token) {
return as_string(&tokens, start);
}
return bad_token(&tokens[start], "Unexcpected token, expecting Int, Dec, String, Bool, or type");
}
fn as_number(tokens: &Vec<token::Token>, start: usize, negative: bool) -> (Result<Constant, ParseError>, usize) {
let number = tokens[start].text.clone();
if tokens.len() > start + 1 && &tokens[start + 1].text == "." {
return as_decimal_number(&tokens, number, start + 2, negative);
}
let val = match number.parse::<i32>() {
Ok(n) => if negative {n * -1} else {n},
Err(_) => 0
};
return (Ok(Constant::IntegerConst {value: val}), start + 1);
}
fn as_decimal_number(tokens: &Vec<token::Token>, whole_number: String, start: usize, negative: bool) -> (Result<Constant, ParseError>, usize) {
let mut number = whole_number.clone();
number.push_str(".");
if tokens.len() < start {
return bad_token(&tokens[start], "Unexcpected end");
} else if roolang_regex::is_numeric(&tokens[start].text) {
number.push_str(&tokens[start].text);
} else {
return bad_token(&tokens[start], "Unexcpected token, expecting numeric value");
}
let val = match number.parse::<f64>() {
Ok(n) => if negative {n * -1.0} else {n},
Err(_) => 0.0
};
return (Ok(Constant::DecimalConst {value: val}), start + 1)
}
fn | (tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
return (Ok(Constant::BooleanConst {value: &tokens[start].text == "true"}), start + 1);
}
fn as_string(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
let mut val = tokens[start].text.clone();
val.remove(tokens[start].text.len() - 1);
val.remove(0);
return (Ok(Constant::CharSeqConst{value: val.clone()}), start + 1);
}
fn bad_token(token: &token::Token, msg: &str) -> (Result<Constant, ParseError>, usize) {
return (Err(ParseError::new(&token, msg)), 0);
}
| as_boolean | identifier_name |
mod.rs | use token;
use roolang_regex;
use common::ParseError as ParseError;
mod test;
#[derive(Clone)]
#[derive(PartialEq)]
pub enum Constant {
IntegerConst {value: i32},
BooleanConst {value: bool},
CharSeqConst {value: String},
DecimalConst {value: f64}
}
pub fn get_constant(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
let first_token = &tokens[start].text;
if roolang_regex::is_numeric(first_token) {
return as_number(&tokens, start, false);
} else if first_token == "-" {
if tokens.len() < start + 1 {
return bad_token(&tokens[start], "Unexcpected end");
} else if roolang_regex::is_numeric(&tokens[start + 1].text) {
return as_number(&tokens, start + 1, true);
} else {
return bad_token(&tokens[start], "Unexcpected token, expecting a number");
}
} else if roolang_regex::is_boolean(first_token) {
return as_boolean(&tokens, start);
} else if roolang_regex::is_string(first_token) {
return as_string(&tokens, start);
}
return bad_token(&tokens[start], "Unexcpected token, expecting Int, Dec, String, Bool, or type");
}
fn as_number(tokens: &Vec<token::Token>, start: usize, negative: bool) -> (Result<Constant, ParseError>, usize) {
let number = tokens[start].text.clone();
if tokens.len() > start + 1 && &tokens[start + 1].text == "." {
return as_decimal_number(&tokens, number, start + 2, negative);
}
let val = match number.parse::<i32>() {
Ok(n) => if negative {n * -1} else {n},
Err(_) => 0
};
return (Ok(Constant::IntegerConst {value: val}), start + 1);
}
fn as_decimal_number(tokens: &Vec<token::Token>, whole_number: String, start: usize, negative: bool) -> (Result<Constant, ParseError>, usize) {
let mut number = whole_number.clone();
number.push_str(".");
if tokens.len() < start {
return bad_token(&tokens[start], "Unexcpected end");
} else if roolang_regex::is_numeric(&tokens[start].text) {
number.push_str(&tokens[start].text);
} else {
return bad_token(&tokens[start], "Unexcpected token, expecting numeric value");
}
let val = match number.parse::<f64>() {
Ok(n) => if negative {n * -1.0} else {n},
Err(_) => 0.0
};
return (Ok(Constant::DecimalConst {value: val}), start + 1)
}
fn as_boolean(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) { |
fn as_string(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
let mut val = tokens[start].text.clone();
val.remove(tokens[start].text.len() - 1);
val.remove(0);
return (Ok(Constant::CharSeqConst{value: val.clone()}), start + 1);
}
fn bad_token(token: &token::Token, msg: &str) -> (Result<Constant, ParseError>, usize) {
return (Err(ParseError::new(&token, msg)), 0);
} | return (Ok(Constant::BooleanConst {value: &tokens[start].text == "true"}), start + 1);
} | random_line_split |
mod.rs | use token;
use roolang_regex;
use common::ParseError as ParseError;
mod test;
#[derive(Clone)]
#[derive(PartialEq)]
pub enum Constant {
IntegerConst {value: i32},
BooleanConst {value: bool},
CharSeqConst {value: String},
DecimalConst {value: f64}
}
pub fn get_constant(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
let first_token = &tokens[start].text;
if roolang_regex::is_numeric(first_token) {
return as_number(&tokens, start, false);
} else if first_token == "-" {
if tokens.len() < start + 1 {
return bad_token(&tokens[start], "Unexcpected end");
} else if roolang_regex::is_numeric(&tokens[start + 1].text) {
return as_number(&tokens, start + 1, true);
} else {
return bad_token(&tokens[start], "Unexcpected token, expecting a number");
}
} else if roolang_regex::is_boolean(first_token) {
return as_boolean(&tokens, start);
} else if roolang_regex::is_string(first_token) {
return as_string(&tokens, start);
}
return bad_token(&tokens[start], "Unexcpected token, expecting Int, Dec, String, Bool, or type");
}
fn as_number(tokens: &Vec<token::Token>, start: usize, negative: bool) -> (Result<Constant, ParseError>, usize) {
let number = tokens[start].text.clone();
if tokens.len() > start + 1 && &tokens[start + 1].text == "." {
return as_decimal_number(&tokens, number, start + 2, negative);
}
let val = match number.parse::<i32>() {
Ok(n) => if negative {n * -1} else {n},
Err(_) => 0
};
return (Ok(Constant::IntegerConst {value: val}), start + 1);
}
fn as_decimal_number(tokens: &Vec<token::Token>, whole_number: String, start: usize, negative: bool) -> (Result<Constant, ParseError>, usize) {
let mut number = whole_number.clone();
number.push_str(".");
if tokens.len() < start {
return bad_token(&tokens[start], "Unexcpected end");
} else if roolang_regex::is_numeric(&tokens[start].text) {
number.push_str(&tokens[start].text);
} else {
return bad_token(&tokens[start], "Unexcpected token, expecting numeric value");
}
let val = match number.parse::<f64>() {
Ok(n) => if negative | else {n},
Err(_) => 0.0
};
return (Ok(Constant::DecimalConst {value: val}), start + 1)
}
fn as_boolean(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
return (Ok(Constant::BooleanConst {value: &tokens[start].text == "true"}), start + 1);
}
fn as_string(tokens: &Vec<token::Token>, start: usize) -> (Result<Constant, ParseError>, usize) {
let mut val = tokens[start].text.clone();
val.remove(tokens[start].text.len() - 1);
val.remove(0);
return (Ok(Constant::CharSeqConst{value: val.clone()}), start + 1);
}
fn bad_token(token: &token::Token, msg: &str) -> (Result<Constant, ParseError>, usize) {
return (Err(ParseError::new(&token, msg)), 0);
}
| {n * -1.0} | conditional_block |
gpushadermodule.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::GPUShaderModuleBinding::GPUShaderModuleMethods;
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use webgpu::WebGPUShaderModule;
#[dom_struct]
pub struct GPUShaderModule {
reflector_: Reflector,
label: DomRefCell<Option<DOMString>>,
shader_module: WebGPUShaderModule,
}
impl GPUShaderModule {
fn new_inherited(shader_module: WebGPUShaderModule) -> GPUShaderModule {
Self {
reflector_: Reflector::new(),
label: DomRefCell::new(None),
shader_module,
}
}
pub fn new(
global: &GlobalScope,
shader_module: WebGPUShaderModule,
) -> DomRoot<GPUShaderModule> {
reflect_dom_object(
Box::new(GPUShaderModule::new_inherited(shader_module)),
global,
)
}
}
impl GPUShaderModule {
pub fn id(&self) -> WebGPUShaderModule {
self.shader_module
}
}
impl GPUShaderModuleMethods for GPUShaderModule {
/// https://gpuweb.github.io/gpuweb/#dom-gpuobjectbase-label
fn GetLabel(&self) -> Option<DOMString> {
self.label.borrow().clone()
}
/// https://gpuweb.github.io/gpuweb/#dom-gpuobjectbase-label
fn | (&self, value: Option<DOMString>) {
*self.label.borrow_mut() = value;
}
}
| SetLabel | identifier_name |
gpushadermodule.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::GPUShaderModuleBinding::GPUShaderModuleMethods;
use crate::dom::bindings::reflector::{reflect_dom_object, Reflector};
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use webgpu::WebGPUShaderModule;
#[dom_struct]
pub struct GPUShaderModule {
reflector_: Reflector,
label: DomRefCell<Option<DOMString>>,
shader_module: WebGPUShaderModule,
}
impl GPUShaderModule {
fn new_inherited(shader_module: WebGPUShaderModule) -> GPUShaderModule {
Self {
reflector_: Reflector::new(),
label: DomRefCell::new(None),
shader_module,
}
}
pub fn new(
global: &GlobalScope,
shader_module: WebGPUShaderModule,
) -> DomRoot<GPUShaderModule> {
reflect_dom_object(
Box::new(GPUShaderModule::new_inherited(shader_module)),
global, | pub fn id(&self) -> WebGPUShaderModule {
self.shader_module
}
}
impl GPUShaderModuleMethods for GPUShaderModule {
/// https://gpuweb.github.io/gpuweb/#dom-gpuobjectbase-label
fn GetLabel(&self) -> Option<DOMString> {
self.label.borrow().clone()
}
/// https://gpuweb.github.io/gpuweb/#dom-gpuobjectbase-label
fn SetLabel(&self, value: Option<DOMString>) {
*self.label.borrow_mut() = value;
}
} | )
}
}
impl GPUShaderModule { | random_line_split |
iterator.rs | use crate::ops::{Decrement, Increment, Indirection};
use crate::{CppBox, CppDeletable, Ref};
use std::os::raw::c_char;
/// `Iterator` and `DoubleEndedIterator` backed by C++ iterators.
///
/// This object is produced by `IntoIterator` implementations on pointer types
/// (`&CppBox`, `&mut CppBox`, `Ptr`, `Ref`). You can also use
/// `cpp_iter` function to construct it manually from two C++ iterator objects.
pub struct CppIterator<T1, T2>
where
T1: CppDeletable,
T2: CppDeletable,
{
begin: CppBox<T1>,
end: CppBox<T2>,
}
/// Constructs a Rust-style iterator from C++ iterators pointing to begin and end
/// of the collection.
///
/// ### Safety
///
/// `begin` and `end` must be valid. It's not possible to make any guarantees about safety, since
/// `CppIterator` will call arbitrary C++ library code when used.
pub unsafe fn cpp_iter<T1, T2>(begin: CppBox<T1>, end: CppBox<T2>) -> CppIterator<T1, T2>
where
T1: CppDeletable,
T2: CppDeletable,
{
CppIterator { begin, end }
}
impl<T1, T2> Iterator for CppIterator<T1, T2>
where
T1: CppDeletable + PartialEq<Ref<T2>> + Indirection + Increment,
T2: CppDeletable,
{
type Item = <T1 as Indirection>::Output;
fn next(&mut self) -> Option<Self::Item> {
unsafe {
if self.begin == self.end.as_ref() {
None
} else {
let inner = &mut *self.begin.as_mut_raw_ptr();
let value = inner.indirection();
let inner = &mut *self.begin.as_mut_raw_ptr();
inner.inc();
Some(value)
}
}
}
}
impl<T1, T2> DoubleEndedIterator for CppIterator<T1, T2>
where
T1: CppDeletable + PartialEq<Ref<T2>> + Indirection + Increment,
T2: CppDeletable + Decrement + Indirection<Output = <T1 as Indirection>::Output>,
{
fn next_back(&mut self) -> Option<Self::Item> {
unsafe {
if self.begin == self.end.as_ref() {
None
} else {
let inner = &mut *self.end.as_mut_raw_ptr();
inner.dec();
let inner = &mut *self.end.as_mut_raw_ptr();
let value = inner.indirection();
Some(value)
}
}
}
}
/// A convenience trait that provides `end_ptr()` method for slices.
pub trait EndPtr {
/// Type of item.
type Item;
/// Returns pointer to the end of the slice (past the last element).
fn end_ptr(&self) -> *const Self::Item;
}
impl<'a, T> EndPtr for &'a [T] {
type Item = T;
fn | (&self) -> *const T {
unsafe { self.as_ptr().add(self.len()) }
}
}
impl<'a> EndPtr for &'a str {
type Item = c_char;
fn end_ptr(&self) -> *const c_char {
unsafe { self.as_ptr().add(self.len()) as *const c_char }
}
}
| end_ptr | identifier_name |
iterator.rs | use crate::ops::{Decrement, Increment, Indirection};
use crate::{CppBox, CppDeletable, Ref};
use std::os::raw::c_char;
/// `Iterator` and `DoubleEndedIterator` backed by C++ iterators.
///
/// This object is produced by `IntoIterator` implementations on pointer types
/// (`&CppBox`, `&mut CppBox`, `Ptr`, `Ref`). You can also use
/// `cpp_iter` function to construct it manually from two C++ iterator objects.
pub struct CppIterator<T1, T2>
where
T1: CppDeletable,
T2: CppDeletable,
{
begin: CppBox<T1>,
end: CppBox<T2>,
}
/// Constructs a Rust-style iterator from C++ iterators pointing to begin and end
/// of the collection.
///
/// ### Safety
///
/// `begin` and `end` must be valid. It's not possible to make any guarantees about safety, since
/// `CppIterator` will call arbitrary C++ library code when used.
pub unsafe fn cpp_iter<T1, T2>(begin: CppBox<T1>, end: CppBox<T2>) -> CppIterator<T1, T2>
where
T1: CppDeletable,
T2: CppDeletable,
{
CppIterator { begin, end }
}
impl<T1, T2> Iterator for CppIterator<T1, T2>
where
T1: CppDeletable + PartialEq<Ref<T2>> + Indirection + Increment,
T2: CppDeletable,
{
type Item = <T1 as Indirection>::Output;
fn next(&mut self) -> Option<Self::Item> {
unsafe {
if self.begin == self.end.as_ref() {
None
} else {
let inner = &mut *self.begin.as_mut_raw_ptr();
let value = inner.indirection();
let inner = &mut *self.begin.as_mut_raw_ptr();
inner.inc();
Some(value)
}
}
}
}
impl<T1, T2> DoubleEndedIterator for CppIterator<T1, T2>
where
T1: CppDeletable + PartialEq<Ref<T2>> + Indirection + Increment,
T2: CppDeletable + Decrement + Indirection<Output = <T1 as Indirection>::Output>,
{
fn next_back(&mut self) -> Option<Self::Item> {
unsafe {
if self.begin == self.end.as_ref() {
None
} else |
}
}
}
/// A convenience trait that provides `end_ptr()` method for slices.
pub trait EndPtr {
/// Type of item.
type Item;
/// Returns pointer to the end of the slice (past the last element).
fn end_ptr(&self) -> *const Self::Item;
}
impl<'a, T> EndPtr for &'a [T] {
type Item = T;
fn end_ptr(&self) -> *const T {
unsafe { self.as_ptr().add(self.len()) }
}
}
impl<'a> EndPtr for &'a str {
type Item = c_char;
fn end_ptr(&self) -> *const c_char {
unsafe { self.as_ptr().add(self.len()) as *const c_char }
}
}
| {
let inner = &mut *self.end.as_mut_raw_ptr();
inner.dec();
let inner = &mut *self.end.as_mut_raw_ptr();
let value = inner.indirection();
Some(value)
} | conditional_block |
iterator.rs | use crate::ops::{Decrement, Increment, Indirection};
use crate::{CppBox, CppDeletable, Ref};
use std::os::raw::c_char;
/// `Iterator` and `DoubleEndedIterator` backed by C++ iterators.
///
/// This object is produced by `IntoIterator` implementations on pointer types
/// (`&CppBox`, `&mut CppBox`, `Ptr`, `Ref`). You can also use
/// `cpp_iter` function to construct it manually from two C++ iterator objects.
pub struct CppIterator<T1, T2>
where
T1: CppDeletable,
T2: CppDeletable,
{
begin: CppBox<T1>,
end: CppBox<T2>,
}
/// Constructs a Rust-style iterator from C++ iterators pointing to begin and end
/// of the collection.
///
/// ### Safety
///
/// `begin` and `end` must be valid. It's not possible to make any guarantees about safety, since
/// `CppIterator` will call arbitrary C++ library code when used.
pub unsafe fn cpp_iter<T1, T2>(begin: CppBox<T1>, end: CppBox<T2>) -> CppIterator<T1, T2>
where
T1: CppDeletable,
T2: CppDeletable,
{
CppIterator { begin, end }
}
impl<T1, T2> Iterator for CppIterator<T1, T2>
where
T1: CppDeletable + PartialEq<Ref<T2>> + Indirection + Increment,
T2: CppDeletable,
{
type Item = <T1 as Indirection>::Output;
fn next(&mut self) -> Option<Self::Item> {
unsafe {
if self.begin == self.end.as_ref() {
None
} else {
let inner = &mut *self.begin.as_mut_raw_ptr();
let value = inner.indirection();
let inner = &mut *self.begin.as_mut_raw_ptr();
inner.inc();
Some(value)
}
}
}
}
impl<T1, T2> DoubleEndedIterator for CppIterator<T1, T2>
where
T1: CppDeletable + PartialEq<Ref<T2>> + Indirection + Increment,
T2: CppDeletable + Decrement + Indirection<Output = <T1 as Indirection>::Output>,
{ | unsafe {
if self.begin == self.end.as_ref() {
None
} else {
let inner = &mut *self.end.as_mut_raw_ptr();
inner.dec();
let inner = &mut *self.end.as_mut_raw_ptr();
let value = inner.indirection();
Some(value)
}
}
}
}
/// A convenience trait that provides `end_ptr()` method for slices.
pub trait EndPtr {
/// Type of item.
type Item;
/// Returns pointer to the end of the slice (past the last element).
fn end_ptr(&self) -> *const Self::Item;
}
impl<'a, T> EndPtr for &'a [T] {
type Item = T;
fn end_ptr(&self) -> *const T {
unsafe { self.as_ptr().add(self.len()) }
}
}
impl<'a> EndPtr for &'a str {
type Item = c_char;
fn end_ptr(&self) -> *const c_char {
unsafe { self.as_ptr().add(self.len()) as *const c_char }
}
} | fn next_back(&mut self) -> Option<Self::Item> { | random_line_split |
iterator.rs | use crate::ops::{Decrement, Increment, Indirection};
use crate::{CppBox, CppDeletable, Ref};
use std::os::raw::c_char;
/// `Iterator` and `DoubleEndedIterator` backed by C++ iterators.
///
/// This object is produced by `IntoIterator` implementations on pointer types
/// (`&CppBox`, `&mut CppBox`, `Ptr`, `Ref`). You can also use
/// `cpp_iter` function to construct it manually from two C++ iterator objects.
pub struct CppIterator<T1, T2>
where
T1: CppDeletable,
T2: CppDeletable,
{
begin: CppBox<T1>,
end: CppBox<T2>,
}
/// Constructs a Rust-style iterator from C++ iterators pointing to begin and end
/// of the collection.
///
/// ### Safety
///
/// `begin` and `end` must be valid. It's not possible to make any guarantees about safety, since
/// `CppIterator` will call arbitrary C++ library code when used.
pub unsafe fn cpp_iter<T1, T2>(begin: CppBox<T1>, end: CppBox<T2>) -> CppIterator<T1, T2>
where
T1: CppDeletable,
T2: CppDeletable,
|
impl<T1, T2> Iterator for CppIterator<T1, T2>
where
T1: CppDeletable + PartialEq<Ref<T2>> + Indirection + Increment,
T2: CppDeletable,
{
type Item = <T1 as Indirection>::Output;
fn next(&mut self) -> Option<Self::Item> {
unsafe {
if self.begin == self.end.as_ref() {
None
} else {
let inner = &mut *self.begin.as_mut_raw_ptr();
let value = inner.indirection();
let inner = &mut *self.begin.as_mut_raw_ptr();
inner.inc();
Some(value)
}
}
}
}
impl<T1, T2> DoubleEndedIterator for CppIterator<T1, T2>
where
T1: CppDeletable + PartialEq<Ref<T2>> + Indirection + Increment,
T2: CppDeletable + Decrement + Indirection<Output = <T1 as Indirection>::Output>,
{
fn next_back(&mut self) -> Option<Self::Item> {
unsafe {
if self.begin == self.end.as_ref() {
None
} else {
let inner = &mut *self.end.as_mut_raw_ptr();
inner.dec();
let inner = &mut *self.end.as_mut_raw_ptr();
let value = inner.indirection();
Some(value)
}
}
}
}
/// A convenience trait that provides `end_ptr()` method for slices.
pub trait EndPtr {
/// Type of item.
type Item;
/// Returns pointer to the end of the slice (past the last element).
fn end_ptr(&self) -> *const Self::Item;
}
impl<'a, T> EndPtr for &'a [T] {
type Item = T;
fn end_ptr(&self) -> *const T {
unsafe { self.as_ptr().add(self.len()) }
}
}
impl<'a> EndPtr for &'a str {
type Item = c_char;
fn end_ptr(&self) -> *const c_char {
unsafe { self.as_ptr().add(self.len()) as *const c_char }
}
}
| {
CppIterator { begin, end }
} | identifier_body |
shootout-pfib.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
A parallel version of fibonacci numbers.
This version is meant mostly as a way of stressing and benchmarking
the task system. It supports a lot of old command-line arguments to
control how it runs.
*/
extern crate getopts;
extern crate time;
use std::os;
use std::result::{Ok, Err};
use std::task;
use std::uint;
fn fib(n: int) -> int {
fn pfib(tx: &Sender<int>, n: int) {
if n == 0 {
tx.send(0);
} else if n <= 2 {
tx.send(1);
} else {
let (tx1, rx) = channel();
let tx2 = tx1.clone();
task::spawn(proc() pfib(&tx2, n - 1));
let tx2 = tx1.clone();
task::spawn(proc() pfib(&tx2, n - 2));
tx.send(rx.recv() + rx.recv());
}
}
let (tx, rx) = channel();
spawn(proc() pfib(&tx, n) );
rx.recv()
}
struct | {
stress: bool
}
fn parse_opts(argv: Vec<String> ) -> Config {
let opts = vec!(getopts::optflag("", "stress", ""));
let argv = argv.iter().map(|x| x.to_string()).collect::<Vec<_>>();
let opt_args = argv.slice(1, argv.len());
match getopts::getopts(opt_args, opts.as_slice()) {
Ok(ref m) => {
return Config {stress: m.opt_present("stress")}
}
Err(_) => { fail!(); }
}
}
fn stress_task(id: int) {
let mut i = 0i;
loop {
let n = 15i;
assert_eq!(fib(n), fib(n));
i += 1;
println!("{}: Completed {} iterations", id, i);
}
}
fn stress(num_tasks: int) {
let mut results = Vec::new();
for i in range(0, num_tasks) {
results.push(task::try_future(proc() {
stress_task(i);
}));
}
for r in results.move_iter() {
r.unwrap();
}
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
vec!("".to_string(), "20".to_string())
} else if args.len() <= 1u {
vec!("".to_string(), "8".to_string())
} else {
args.move_iter().map(|x| x.to_string()).collect()
};
let opts = parse_opts(args.clone());
if opts.stress {
stress(2);
} else {
let max = uint::parse_bytes(args.get(1).as_bytes(), 10u).unwrap() as
int;
let num_trials = 10;
for n in range(1, max + 1) {
for _ in range(0u, num_trials) {
let start = time::precise_time_ns();
let fibn = fib(n);
let stop = time::precise_time_ns();
let elapsed = stop - start;
println!("{}\t{}\t{}", n, fibn, elapsed.to_string());
}
}
}
}
| Config | identifier_name |
shootout-pfib.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
A parallel version of fibonacci numbers.
This version is meant mostly as a way of stressing and benchmarking
the task system. It supports a lot of old command-line arguments to
control how it runs.
*/
extern crate getopts;
extern crate time;
use std::os;
use std::result::{Ok, Err};
use std::task;
use std::uint;
fn fib(n: int) -> int {
fn pfib(tx: &Sender<int>, n: int) {
if n == 0 {
tx.send(0);
} else if n <= 2 {
tx.send(1);
} else {
let (tx1, rx) = channel();
let tx2 = tx1.clone();
task::spawn(proc() pfib(&tx2, n - 1));
let tx2 = tx1.clone();
task::spawn(proc() pfib(&tx2, n - 2));
tx.send(rx.recv() + rx.recv());
}
}
let (tx, rx) = channel();
spawn(proc() pfib(&tx, n) );
rx.recv()
}
struct Config {
stress: bool
}
fn parse_opts(argv: Vec<String> ) -> Config {
let opts = vec!(getopts::optflag("", "stress", ""));
let argv = argv.iter().map(|x| x.to_string()).collect::<Vec<_>>();
let opt_args = argv.slice(1, argv.len());
match getopts::getopts(opt_args, opts.as_slice()) {
Ok(ref m) => {
return Config {stress: m.opt_present("stress")}
}
Err(_) => { fail!(); }
}
}
fn stress_task(id: int) {
let mut i = 0i;
loop {
let n = 15i;
assert_eq!(fib(n), fib(n));
i += 1;
println!("{}: Completed {} iterations", id, i);
}
}
fn stress(num_tasks: int) |
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
vec!("".to_string(), "20".to_string())
} else if args.len() <= 1u {
vec!("".to_string(), "8".to_string())
} else {
args.move_iter().map(|x| x.to_string()).collect()
};
let opts = parse_opts(args.clone());
if opts.stress {
stress(2);
} else {
let max = uint::parse_bytes(args.get(1).as_bytes(), 10u).unwrap() as
int;
let num_trials = 10;
for n in range(1, max + 1) {
for _ in range(0u, num_trials) {
let start = time::precise_time_ns();
let fibn = fib(n);
let stop = time::precise_time_ns();
let elapsed = stop - start;
println!("{}\t{}\t{}", n, fibn, elapsed.to_string());
}
}
}
}
| {
let mut results = Vec::new();
for i in range(0, num_tasks) {
results.push(task::try_future(proc() {
stress_task(i);
}));
}
for r in results.move_iter() {
r.unwrap();
}
} | identifier_body |
shootout-pfib.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
A parallel version of fibonacci numbers.
This version is meant mostly as a way of stressing and benchmarking
the task system. It supports a lot of old command-line arguments to
control how it runs.
*/
extern crate getopts;
extern crate time;
use std::os;
use std::result::{Ok, Err};
use std::task;
use std::uint;
fn fib(n: int) -> int {
fn pfib(tx: &Sender<int>, n: int) {
if n == 0 {
tx.send(0);
} else if n <= 2 {
tx.send(1);
} else {
let (tx1, rx) = channel();
let tx2 = tx1.clone();
task::spawn(proc() pfib(&tx2, n - 1));
let tx2 = tx1.clone();
task::spawn(proc() pfib(&tx2, n - 2));
tx.send(rx.recv() + rx.recv());
}
}
let (tx, rx) = channel();
spawn(proc() pfib(&tx, n) );
rx.recv()
}
struct Config {
stress: bool
}
fn parse_opts(argv: Vec<String> ) -> Config {
let opts = vec!(getopts::optflag("", "stress", ""));
let argv = argv.iter().map(|x| x.to_string()).collect::<Vec<_>>();
let opt_args = argv.slice(1, argv.len());
match getopts::getopts(opt_args, opts.as_slice()) {
Ok(ref m) => |
Err(_) => { fail!(); }
}
}
fn stress_task(id: int) {
let mut i = 0i;
loop {
let n = 15i;
assert_eq!(fib(n), fib(n));
i += 1;
println!("{}: Completed {} iterations", id, i);
}
}
fn stress(num_tasks: int) {
let mut results = Vec::new();
for i in range(0, num_tasks) {
results.push(task::try_future(proc() {
stress_task(i);
}));
}
for r in results.move_iter() {
r.unwrap();
}
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
vec!("".to_string(), "20".to_string())
} else if args.len() <= 1u {
vec!("".to_string(), "8".to_string())
} else {
args.move_iter().map(|x| x.to_string()).collect()
};
let opts = parse_opts(args.clone());
if opts.stress {
stress(2);
} else {
let max = uint::parse_bytes(args.get(1).as_bytes(), 10u).unwrap() as
int;
let num_trials = 10;
for n in range(1, max + 1) {
for _ in range(0u, num_trials) {
let start = time::precise_time_ns();
let fibn = fib(n);
let stop = time::precise_time_ns();
let elapsed = stop - start;
println!("{}\t{}\t{}", n, fibn, elapsed.to_string());
}
}
}
}
| {
return Config {stress: m.opt_present("stress")}
} | conditional_block |
shootout-pfib.rs | // file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
A parallel version of fibonacci numbers.
This version is meant mostly as a way of stressing and benchmarking
the task system. It supports a lot of old command-line arguments to
control how it runs.
*/
extern crate getopts;
extern crate time;
use std::os;
use std::result::{Ok, Err};
use std::task;
use std::uint;
fn fib(n: int) -> int {
fn pfib(tx: &Sender<int>, n: int) {
if n == 0 {
tx.send(0);
} else if n <= 2 {
tx.send(1);
} else {
let (tx1, rx) = channel();
let tx2 = tx1.clone();
task::spawn(proc() pfib(&tx2, n - 1));
let tx2 = tx1.clone();
task::spawn(proc() pfib(&tx2, n - 2));
tx.send(rx.recv() + rx.recv());
}
}
let (tx, rx) = channel();
spawn(proc() pfib(&tx, n) );
rx.recv()
}
struct Config {
stress: bool
}
fn parse_opts(argv: Vec<String> ) -> Config {
let opts = vec!(getopts::optflag("", "stress", ""));
let argv = argv.iter().map(|x| x.to_string()).collect::<Vec<_>>();
let opt_args = argv.slice(1, argv.len());
match getopts::getopts(opt_args, opts.as_slice()) {
Ok(ref m) => {
return Config {stress: m.opt_present("stress")}
}
Err(_) => { fail!(); }
}
}
fn stress_task(id: int) {
let mut i = 0i;
loop {
let n = 15i;
assert_eq!(fib(n), fib(n));
i += 1;
println!("{}: Completed {} iterations", id, i);
}
}
fn stress(num_tasks: int) {
let mut results = Vec::new();
for i in range(0, num_tasks) {
results.push(task::try_future(proc() {
stress_task(i);
}));
}
for r in results.move_iter() {
r.unwrap();
}
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
vec!("".to_string(), "20".to_string())
} else if args.len() <= 1u {
vec!("".to_string(), "8".to_string())
} else {
args.move_iter().map(|x| x.to_string()).collect()
};
let opts = parse_opts(args.clone());
if opts.stress {
stress(2);
} else {
let max = uint::parse_bytes(args.get(1).as_bytes(), 10u).unwrap() as
int;
let num_trials = 10;
for n in range(1, max + 1) {
for _ in range(0u, num_trials) {
let start = time::precise_time_ns();
let fibn = fib(n);
let stop = time::precise_time_ns();
let elapsed = stop - start;
println!("{}\t{}\t{}", n, fibn, elapsed.to_string());
}
}
}
} | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT | random_line_split |
|
fussy.rs | extern crate fuss;
use self::fuss::Simplex;
use core::world::dungeon::builder::Buildable;
use core::world::dungeon::map;
use core::world::dungeon::map::Measurable;
///
/// Builder for generating noise maps
///
pub struct Fussy {
pub grid: map::Grid<u8>,
pub w: usize,
pub h: usize,
pub noise: Simplex,
pub threshold: f32
}
impl Fussy {
///
/// Add noise to the map based on sum_octaves
///
/// Will only "fill" tile if the threshold is met, threshold should be anywhere between [0, 2]
///
fn add_noise(&mut self) {
for x in 0..self.w {
for y in 0..self.h { | if self.noise.sum_octave_2d(16, x as f32, y as f32, 0.5, 0.007) + 1.0 > self.threshold {
self.grid[x][y] = 1;
}
}
}
}
///
/// Return a new `Fussy`
///
pub fn new(grid: map::Grid<u8>, threshold: f32) -> Self {
// Make a new dungeon with our fresh grid of size `w` by `h`
let fussy = Fussy {
grid: grid.clone(),
w: grid.width(),
h: grid.height(),
noise: Simplex::new(),
threshold
};
return fussy;
}
}
impl Buildable for Fussy {
type Output = u8;
fn build(&mut self) -> map::Grid<u8> {
self.add_noise();
return self.grid.clone();
}
} | random_line_split |
|
fussy.rs | extern crate fuss;
use self::fuss::Simplex;
use core::world::dungeon::builder::Buildable;
use core::world::dungeon::map;
use core::world::dungeon::map::Measurable;
///
/// Builder for generating noise maps
///
pub struct Fussy {
pub grid: map::Grid<u8>,
pub w: usize,
pub h: usize,
pub noise: Simplex,
pub threshold: f32
}
impl Fussy {
///
/// Add noise to the map based on sum_octaves
///
/// Will only "fill" tile if the threshold is met, threshold should be anywhere between [0, 2]
///
fn add_noise(&mut self) {
for x in 0..self.w {
for y in 0..self.h {
if self.noise.sum_octave_2d(16, x as f32, y as f32, 0.5, 0.007) + 1.0 > self.threshold {
self.grid[x][y] = 1;
}
}
}
}
///
/// Return a new `Fussy`
///
pub fn | (grid: map::Grid<u8>, threshold: f32) -> Self {
// Make a new dungeon with our fresh grid of size `w` by `h`
let fussy = Fussy {
grid: grid.clone(),
w: grid.width(),
h: grid.height(),
noise: Simplex::new(),
threshold
};
return fussy;
}
}
impl Buildable for Fussy {
type Output = u8;
fn build(&mut self) -> map::Grid<u8> {
self.add_noise();
return self.grid.clone();
}
} | new | identifier_name |
fussy.rs | extern crate fuss;
use self::fuss::Simplex;
use core::world::dungeon::builder::Buildable;
use core::world::dungeon::map;
use core::world::dungeon::map::Measurable;
///
/// Builder for generating noise maps
///
pub struct Fussy {
pub grid: map::Grid<u8>,
pub w: usize,
pub h: usize,
pub noise: Simplex,
pub threshold: f32
}
impl Fussy {
///
/// Add noise to the map based on sum_octaves
///
/// Will only "fill" tile if the threshold is met, threshold should be anywhere between [0, 2]
///
fn add_noise(&mut self) {
for x in 0..self.w {
for y in 0..self.h {
if self.noise.sum_octave_2d(16, x as f32, y as f32, 0.5, 0.007) + 1.0 > self.threshold {
self.grid[x][y] = 1;
}
}
}
}
///
/// Return a new `Fussy`
///
pub fn new(grid: map::Grid<u8>, threshold: f32) -> Self |
}
impl Buildable for Fussy {
type Output = u8;
fn build(&mut self) -> map::Grid<u8> {
self.add_noise();
return self.grid.clone();
}
} | {
// Make a new dungeon with our fresh grid of size `w` by `h`
let fussy = Fussy {
grid: grid.clone(),
w: grid.width(),
h: grid.height(),
noise: Simplex::new(),
threshold
};
return fussy;
} | identifier_body |
fussy.rs | extern crate fuss;
use self::fuss::Simplex;
use core::world::dungeon::builder::Buildable;
use core::world::dungeon::map;
use core::world::dungeon::map::Measurable;
///
/// Builder for generating noise maps
///
pub struct Fussy {
pub grid: map::Grid<u8>,
pub w: usize,
pub h: usize,
pub noise: Simplex,
pub threshold: f32
}
impl Fussy {
///
/// Add noise to the map based on sum_octaves
///
/// Will only "fill" tile if the threshold is met, threshold should be anywhere between [0, 2]
///
fn add_noise(&mut self) {
for x in 0..self.w {
for y in 0..self.h {
if self.noise.sum_octave_2d(16, x as f32, y as f32, 0.5, 0.007) + 1.0 > self.threshold |
}
}
}
///
/// Return a new `Fussy`
///
pub fn new(grid: map::Grid<u8>, threshold: f32) -> Self {
// Make a new dungeon with our fresh grid of size `w` by `h`
let fussy = Fussy {
grid: grid.clone(),
w: grid.width(),
h: grid.height(),
noise: Simplex::new(),
threshold
};
return fussy;
}
}
impl Buildable for Fussy {
type Output = u8;
fn build(&mut self) -> map::Grid<u8> {
self.add_noise();
return self.grid.clone();
}
} | {
self.grid[x][y] = 1;
} | conditional_block |
font.rs | First round out to pixel boundaries
// CG Origin is bottom left
let mut left = bounds.origin.x.floor() as i32;
let mut bottom = bounds.origin.y.floor() as i32;
let mut right = (bounds.origin.x + bounds.size.width + x_offset).ceil() as i32;
let mut top = (bounds.origin.y + bounds.size.height + y_offset).ceil() as i32;
// Expand the bounds by 1 pixel, to give CG room for anti-aliasing.
// Note that this outset is to allow room for LCD smoothed glyphs. However, the correct outset
// is not currently known, as CG dilates the outlines by some percentage.
// This is taken from Skia.
left -= 1;
bottom -= 1;
right += 1;
top += 1;
let width = right - left;
let height = top - bottom;
GlyphMetrics {
rasterized_left: left,
rasterized_width: width as u32,
rasterized_height: height as u32,
rasterized_ascent: top,
rasterized_descent: -bottom,
advance: advance.width as f32,
}
}
#[link(name = "ApplicationServices", kind = "framework")]
extern {
static kCTFontVariationAxisIdentifierKey: CFStringRef;
static kCTFontVariationAxisNameKey: CFStringRef;
static kCTFontVariationAxisMinimumValueKey: CFStringRef;
static kCTFontVariationAxisMaximumValueKey: CFStringRef;
static kCTFontVariationAxisDefaultValueKey: CFStringRef;
fn CTFontCopyVariationAxes(font: CTFontRef) -> CFArrayRef;
}
fn new_ct_font_with_variations(cg_font: &CGFont, size: f64, variations: &[FontVariation]) -> CTFont {
unsafe {
let ct_font = core_text::font::new_from_CGFont(cg_font, size);
if variations.is_empty() {
return ct_font;
}
let axes_ref = CTFontCopyVariationAxes(ct_font.as_concrete_TypeRef());
if axes_ref.is_null() {
return ct_font;
}
let axes: CFArray<CFDictionary> = TCFType::wrap_under_create_rule(axes_ref);
let mut vals: Vec<(CFString, CFNumber)> = Vec::with_capacity(variations.len() as usize);
for axis in axes.iter() {
if!axis.instance_of::<CFDictionary>() {
return ct_font;
}
let tag_val = match axis.find(kCTFontVariationAxisIdentifierKey as *const _) {
Some(tag_ptr) => {
let tag: CFNumber = TCFType::wrap_under_get_rule(tag_ptr as CFNumberRef);
if!tag.instance_of::<CFNumber>() {
return ct_font;
}
match tag.to_i64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let mut val = match variations.iter().find(|variation| (variation.tag as i64) == tag_val) {
Some(variation) => variation.value as f64,
None => continue,
};
let name: CFString = match axis.find(kCTFontVariationAxisNameKey as *const _) {
Some(name_ptr) => TCFType::wrap_under_get_rule(name_ptr as CFStringRef),
None => return ct_font,
};
if!name.instance_of::<CFString>() {
return ct_font;
}
let min_val = match axis.find(kCTFontVariationAxisMinimumValueKey as *const _) {
Some(min_ptr) => {
let min: CFNumber = TCFType::wrap_under_get_rule(min_ptr as CFNumberRef);
if!min.instance_of::<CFNumber>() {
return ct_font;
}
match min.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let max_val = match axis.find(kCTFontVariationAxisMaximumValueKey as *const _) {
Some(max_ptr) => {
let max: CFNumber = TCFType::wrap_under_get_rule(max_ptr as CFNumberRef);
if!max.instance_of::<CFNumber>() {
return ct_font;
}
match max.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let def_val = match axis.find(kCTFontVariationAxisDefaultValueKey as *const _) {
Some(def_ptr) => {
let def: CFNumber = TCFType::wrap_under_get_rule(def_ptr as CFNumberRef);
if!def.instance_of::<CFNumber>() {
return ct_font;
}
match def.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
val = val.max(min_val).min(max_val);
if val!= def_val {
vals.push((name, CFNumber::from(val)));
}
}
if vals.is_empty() {
return ct_font;
}
let vals_dict = CFDictionary::from_CFType_pairs(&vals);
let cg_var_font = cg_font.create_copy_from_variations(&vals_dict).unwrap();
core_text::font::new_from_CGFont_with_variations(&cg_var_font, size, &vals_dict)
}
}
fn is_bitmap_font(ct_font: &CTFont) -> bool {
let traits = ct_font.symbolic_traits();
(traits & kCTFontColorGlyphsTrait)!= 0
}
// Skew factor matching Gecko/CG.
const OBLIQUE_SKEW_FACTOR: f32 = 0.25;
impl FontContext {
pub fn new() -> Result<FontContext, ResourceCacheError> {
debug!("Test for subpixel AA support: {}", supports_subpixel_aa());
// Force CG to use sRGB color space to gamma correct.
let contrast = 0.0;
let gamma = 0.0;
Ok(FontContext {
cg_fonts: FastHashMap::default(),
ct_fonts: FastHashMap::default(),
gamma_lut: GammaLut::new(contrast, gamma, gamma),
})
}
pub fn has_font(&self, font_key: &FontKey) -> bool {
self.cg_fonts.contains_key(font_key)
}
pub fn add_raw_font(&mut self, font_key: &FontKey, bytes: Arc<Vec<u8>>, index: u32) {
if self.cg_fonts.contains_key(font_key) {
return;
}
assert_eq!(index, 0);
let data_provider = CGDataProvider::from_buffer(bytes);
let cg_font = match CGFont::from_data_provider(data_provider) {
Err(_) => return,
Ok(cg_font) => cg_font,
};
self.cg_fonts.insert(*font_key, cg_font);
}
pub fn add_native_font(&mut self, font_key: &FontKey, native_font_handle: NativeFontHandle) {
if self.cg_fonts.contains_key(font_key) {
return;
}
self.cg_fonts
.insert(*font_key, native_font_handle.0);
}
pub fn delete_font(&mut self, font_key: &FontKey) {
if let Some(_) = self.cg_fonts.remove(font_key) {
self.ct_fonts.retain(|k, _| k.0!= *font_key);
}
}
fn get_ct_font(
&mut self,
font_key: FontKey,
size: Au,
variations: &[FontVariation],
) -> Option<CTFont> {
match self.ct_fonts.entry((font_key, size, variations.to_vec())) {
Entry::Occupied(entry) => Some((*entry.get()).clone()),
Entry::Vacant(entry) => {
let cg_font = match self.cg_fonts.get(&font_key) {
None => return None,
Some(cg_font) => cg_font,
};
let ct_font = new_ct_font_with_variations(cg_font, size.to_f64_px(), variations);
entry.insert(ct_font.clone());
Some(ct_font)
}
}
}
pub fn get_glyph_index(&mut self, font_key: FontKey, ch: char) -> Option<u32> {
let character = ch as u16;
let mut glyph = 0;
self.get_ct_font(font_key, Au::from_px(16), &[])
.and_then(|ref ct_font| {
let result = ct_font.get_glyphs_for_characters(&character, &mut glyph, 1);
if result {
Some(glyph as u32)
} else {
None
}
})
}
pub fn get_glyph_dimensions(
&mut self,
font: &FontInstance,
key: &GlyphKey,
) -> Option<GlyphDimensions> {
self.get_ct_font(font.font_key, font.size, &font.variations)
.and_then(|ref ct_font| {
let glyph = key.index as CGGlyph;
let bitmap = is_bitmap_font(ct_font);
let (x_offset, y_offset) = if bitmap { (0.0, 0.0) } else { font.get_subpx_offset(key) };
let transform = if font.flags.intersects(FontInstanceFlags::SYNTHETIC_ITALICS |
FontInstanceFlags::TRANSPOSE |
FontInstanceFlags::FLIP_X |
FontInstanceFlags::FLIP_Y) {
let mut shape = FontTransform::identity();
if font.flags.contains(FontInstanceFlags::FLIP_X) {
shape = shape.flip_x();
}
if font.flags.contains(FontInstanceFlags::FLIP_Y) {
shape = shape.flip_y();
}
if font.flags.contains(FontInstanceFlags::TRANSPOSE) {
shape = shape.swap_xy();
}
if font.flags.contains(FontInstanceFlags::SYNTHETIC_ITALICS) {
shape = shape.synthesize_italics(OBLIQUE_SKEW_FACTOR);
}
Some(CGAffineTransform {
a: shape.scale_x as f64,
b: -shape.skew_y as f64,
c: -shape.skew_x as f64,
d: shape.scale_y as f64,
tx: 0.0,
ty: 0.0,
})
} else {
None
};
let extra_strikes = font.get_extra_strikes(1.0);
let metrics = get_glyph_metrics(
ct_font,
transform.as_ref(),
glyph,
x_offset,
y_offset,
extra_strikes as f64,
);
if metrics.rasterized_width == 0 || metrics.rasterized_height == 0 {
None
} else {
Some(GlyphDimensions {
left: metrics.rasterized_left,
top: metrics.rasterized_ascent,
width: metrics.rasterized_width as u32,
height: metrics.rasterized_height as u32,
advance: metrics.advance,
})
}
})
}
// Assumes the pixels here are linear values from CG
#[cfg(not(feature = "pathfinder"))]
fn gamma_correct_pixels(
&self,
pixels: &mut Vec<u8>,
render_mode: FontRenderMode,
color: ColorU,
) {
// Then convert back to gamma corrected values.
match render_mode {
FontRenderMode::Alpha => {
self.gamma_lut.preblend_grayscale(pixels, color);
}
FontRenderMode::Subpixel => {
self.gamma_lut.preblend(pixels, color);
}
_ => {} // Again, give mono untouched since only the alpha matters.
}
}
#[allow(dead_code)]
fn print_glyph_data(&mut self, data: &[u8], width: usize, height: usize) {
// Rust doesn't have step_by support on stable :(
println!("Width is: {:?} height: {:?}", width, height);
for i in 0.. height {
let current_height = i * width * 4;
for pixel in data[current_height.. current_height + (width * 4)].chunks(4) {
let b = pixel[0];
let g = pixel[1];
let r = pixel[2];
let a = pixel[3];
print!("({}, {}, {}, {}) ", r, g, b, a);
}
println!();
}
}
pub fn prepare_font(font: &mut FontInstance) {
match font.render_mode {
FontRenderMode::Mono => {
// In mono mode the color of the font is irrelevant.
font.color = ColorU::new(255, 255, 255, 255);
// Subpixel positioning is disabled in mono mode.
font.subpx_dir = SubpixelDirection::None;
}
FontRenderMode::Alpha => { | let rb = if should_use_white_on_black(font.color) { 255 } else { 0 };
ColorU::new(rb, g, rb, a)
} else {
ColorU::new(255, 255, 255, 255)
};
}
FontRenderMode::Subpixel => {
// Quantization may change the light/dark determination, so quantize in the
// direction necessary to respect the threshold.
font.color = if should_use_white_on_black(font.color) {
font.color.quantized_ceil()
} else {
font.color.quantized_floor()
};
}
}
}
#[cfg(not(feature = "pathfinder"))]
pub fn rasterize_glyph(&mut self, font: &FontInstance, key: &GlyphKey) -> GlyphRasterResult {
let (x_scale, y_scale) = font.transform.compute_scale().unwrap_or((1.0, 1.0));
let size = font.size.scale_by(y_scale as f32);
let ct_font = match self.get_ct_font(font.font_key, size, &font.variations) {
Some(font) => font,
None => return GlyphRasterResult::LoadFailed,
};
let bitmap = is_bitmap_font(&ct_font);
let (mut shape, (x_offset, y_offset)) = if bitmap {
(FontTransform::identity(), (0.0, 0.0))
} else {
(font.transform.invert_scale(y_scale, y_scale), font.get_subpx_offset(key))
};
if font.flags.contains(FontInstanceFlags::FLIP_X) {
shape = shape.flip_x();
}
if font.flags.contains(FontInstanceFlags::FLIP_Y) {
shape = shape.flip_y();
}
if font.flags.contains(FontInstanceFlags::TRANSPOSE) {
shape = shape.swap_xy();
}
if font.flags.contains(FontInstanceFlags::SYNTHETIC_ITALICS) {
shape = shape.synthesize_italics(OBLIQUE_SKEW_FACTOR);
}
let transform = if!shape.is_identity() {
Some(CGAffineTransform {
a: shape.scale_x as f64,
b: -shape.skew_y as f64,
c: -shape.skew_x as f64,
d: shape.scale_y as f64,
tx: 0.0,
ty: 0.0,
})
} else {
None
};
let glyph = key.index as CGGlyph;
let (strike_scale, pixel_step) = if bitmap { (y_scale, 1.0) } else { (x_scale, y_scale / x_scale) };
let extra_strikes = font.get_extra_strikes(strike_scale | font.color = if font.flags.contains(FontInstanceFlags::FONT_SMOOTHING) {
// Only the G channel is used to index grayscale tables,
// so use R and B to preserve light/dark determination.
let ColorU { g, a, .. } = font.color.luminance_color().quantized_ceil(); | random_line_split |
font.rs | () -> bool {
let mut cg_context = CGContext::create_bitmap_context(
None,
1,
1,
8,
4,
&CGColorSpace::create_device_rgb(),
kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little,
);
let ct_font = core_text::font::new_from_name("Helvetica", 16.).unwrap();
cg_context.set_should_smooth_fonts(true);
cg_context.set_should_antialias(true);
cg_context.set_rgb_fill_color(1.0, 1.0, 1.0, 1.0);
let point = CGPoint { x: -1., y: 0. };
let glyph = '|' as CGGlyph;
ct_font.draw_glyphs(&[glyph], &[point], cg_context.clone());
let data = cg_context.data();
data[0]!= data[1] || data[1]!= data[2]
}
fn should_use_white_on_black(color: ColorU) -> bool {
let (r, g, b) = (color.r as u32, color.g as u32, color.b as u32);
// These thresholds were determined on 10.12 by observing what CG does.
r >= 85 && g >= 85 && b >= 85 && r + g + b >= 2 * 255
}
fn get_glyph_metrics(
ct_font: &CTFont,
transform: Option<&CGAffineTransform>,
glyph: CGGlyph,
x_offset: f64,
y_offset: f64,
extra_width: f64,
) -> GlyphMetrics {
let mut bounds = ct_font.get_bounding_rects_for_glyphs(kCTFontDefaultOrientation, &[glyph]);
if bounds.origin.x.is_nan() || bounds.origin.y.is_nan() || bounds.size.width.is_nan() ||
bounds.size.height.is_nan()
{
// If an unexpected glyph index is requested, core text will return NaN values
// which causes us to do bad thing as the value is cast into an integer and
// overflow when expanding the bounds a few lines below.
// Instead we are better off returning zero-sized metrics because this special
// case is handled by the callers of this method.
return GlyphMetrics {
rasterized_left: 0,
rasterized_width: 0,
rasterized_height: 0,
rasterized_ascent: 0,
rasterized_descent: 0,
advance: 0.0,
};
}
let mut advance = CGSize { width: 0.0, height: 0.0 };
ct_font.get_advances_for_glyphs(kCTFontDefaultOrientation, &glyph, &mut advance, 1);
if bounds.size.width > 0.0 {
bounds.size.width += extra_width;
}
if advance.width > 0.0 {
advance.width += extra_width;
}
if let Some(transform) = transform {
bounds = bounds.apply_transform(transform);
}
// First round out to pixel boundaries
// CG Origin is bottom left
let mut left = bounds.origin.x.floor() as i32;
let mut bottom = bounds.origin.y.floor() as i32;
let mut right = (bounds.origin.x + bounds.size.width + x_offset).ceil() as i32;
let mut top = (bounds.origin.y + bounds.size.height + y_offset).ceil() as i32;
// Expand the bounds by 1 pixel, to give CG room for anti-aliasing.
// Note that this outset is to allow room for LCD smoothed glyphs. However, the correct outset
// is not currently known, as CG dilates the outlines by some percentage.
// This is taken from Skia.
left -= 1;
bottom -= 1;
right += 1;
top += 1;
let width = right - left;
let height = top - bottom;
GlyphMetrics {
rasterized_left: left,
rasterized_width: width as u32,
rasterized_height: height as u32,
rasterized_ascent: top,
rasterized_descent: -bottom,
advance: advance.width as f32,
}
}
#[link(name = "ApplicationServices", kind = "framework")]
extern {
static kCTFontVariationAxisIdentifierKey: CFStringRef;
static kCTFontVariationAxisNameKey: CFStringRef;
static kCTFontVariationAxisMinimumValueKey: CFStringRef;
static kCTFontVariationAxisMaximumValueKey: CFStringRef;
static kCTFontVariationAxisDefaultValueKey: CFStringRef;
fn CTFontCopyVariationAxes(font: CTFontRef) -> CFArrayRef;
}
fn new_ct_font_with_variations(cg_font: &CGFont, size: f64, variations: &[FontVariation]) -> CTFont {
unsafe {
let ct_font = core_text::font::new_from_CGFont(cg_font, size);
if variations.is_empty() {
return ct_font;
}
let axes_ref = CTFontCopyVariationAxes(ct_font.as_concrete_TypeRef());
if axes_ref.is_null() {
return ct_font;
}
let axes: CFArray<CFDictionary> = TCFType::wrap_under_create_rule(axes_ref);
let mut vals: Vec<(CFString, CFNumber)> = Vec::with_capacity(variations.len() as usize);
for axis in axes.iter() {
if!axis.instance_of::<CFDictionary>() {
return ct_font;
}
let tag_val = match axis.find(kCTFontVariationAxisIdentifierKey as *const _) {
Some(tag_ptr) => {
let tag: CFNumber = TCFType::wrap_under_get_rule(tag_ptr as CFNumberRef);
if!tag.instance_of::<CFNumber>() {
return ct_font;
}
match tag.to_i64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let mut val = match variations.iter().find(|variation| (variation.tag as i64) == tag_val) {
Some(variation) => variation.value as f64,
None => continue,
};
let name: CFString = match axis.find(kCTFontVariationAxisNameKey as *const _) {
Some(name_ptr) => TCFType::wrap_under_get_rule(name_ptr as CFStringRef),
None => return ct_font,
};
if!name.instance_of::<CFString>() {
return ct_font;
}
let min_val = match axis.find(kCTFontVariationAxisMinimumValueKey as *const _) {
Some(min_ptr) => {
let min: CFNumber = TCFType::wrap_under_get_rule(min_ptr as CFNumberRef);
if!min.instance_of::<CFNumber>() {
return ct_font;
}
match min.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let max_val = match axis.find(kCTFontVariationAxisMaximumValueKey as *const _) {
Some(max_ptr) => {
let max: CFNumber = TCFType::wrap_under_get_rule(max_ptr as CFNumberRef);
if!max.instance_of::<CFNumber>() {
return ct_font;
}
match max.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let def_val = match axis.find(kCTFontVariationAxisDefaultValueKey as *const _) {
Some(def_ptr) => {
let def: CFNumber = TCFType::wrap_under_get_rule(def_ptr as CFNumberRef);
if!def.instance_of::<CFNumber>() {
return ct_font;
}
match def.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
val = val.max(min_val).min(max_val);
if val!= def_val {
vals.push((name, CFNumber::from(val)));
}
}
if vals.is_empty() {
return ct_font;
}
let vals_dict = CFDictionary::from_CFType_pairs(&vals);
let cg_var_font = cg_font.create_copy_from_variations(&vals_dict).unwrap();
core_text::font::new_from_CGFont_with_variations(&cg_var_font, size, &vals_dict)
}
}
fn is_bitmap_font(ct_font: &CTFont) -> bool {
let traits = ct_font.symbolic_traits();
(traits & kCTFontColorGlyphsTrait)!= 0
}
// Skew factor matching Gecko/CG.
const OBLIQUE_SKEW_FACTOR: f32 = 0.25;
impl FontContext {
pub fn new() -> Result<FontContext, ResourceCacheError> {
debug!("Test for subpixel AA support: {}", supports_subpixel_aa());
// Force CG to use sRGB color space to gamma correct.
let contrast = 0.0;
let gamma = 0.0;
Ok(FontContext {
cg_fonts: FastHashMap::default(),
ct_fonts: FastHashMap::default(),
gamma_lut: GammaLut::new(contrast, gamma, gamma),
})
}
pub fn has_font(&self, font_key: &FontKey) -> bool {
self.cg_fonts.contains_key(font_key)
}
pub fn add_raw_font(&mut self, font_key: &FontKey, bytes: Arc<Vec<u8>>, index: u32) {
if self.cg_fonts.contains_key(font_key) {
return;
}
assert_eq!(index, 0);
let data_provider = CGDataProvider::from_buffer(bytes);
let cg_font = match CGFont::from_data_provider(data_provider) {
Err(_) => return,
Ok(cg_font) => cg_font,
};
self.cg_fonts.insert(*font_key, cg_font);
}
pub fn add_native_font(&mut self, font_key: &FontKey, native_font_handle: NativeFontHandle) {
if self.cg_fonts.contains_key(font_key) {
return;
}
self.cg_fonts
.insert(*font_key, native_font_handle.0);
}
pub fn delete_font(&mut self, font_key: &FontKey) {
if let Some(_) = self.cg_fonts.remove(font_key) {
self.ct_fonts.retain(|k, _| k.0!= *font_key);
}
}
fn get_ct_font(
&mut self,
font_key: FontKey,
size: Au,
variations: &[FontVariation],
) -> Option<CTFont> {
match self.ct_fonts.entry((font_key, size, variations.to_vec())) {
Entry::Occupied(entry) => Some((*entry.get()).clone()),
Entry::Vacant(entry) => {
let cg_font = match self.cg_fonts.get(&font_key) {
None => return None,
Some(cg_font) => cg_font,
};
let ct_font = new_ct_font_with_variations(cg_font, size.to_f64_px(), variations);
entry.insert(ct_font.clone());
Some(ct_font)
}
}
}
pub fn get_glyph_index(&mut self, font_key: FontKey, ch: char) -> Option<u32> {
let character = ch as u16;
let mut glyph = 0;
self.get_ct_font(font_key, Au::from_px(16), &[])
.and_then(|ref ct_font| {
let result = ct_font.get_glyphs_for_characters(&character, &mut glyph, 1);
if result {
Some(glyph as u32)
} else {
None
}
})
}
pub fn get_glyph_dimensions(
&mut self,
font: &FontInstance,
key: &GlyphKey,
) -> Option<GlyphDimensions> {
self.get_ct_font(font.font_key, font.size, &font.variations)
.and_then(|ref ct_font| {
let glyph = key.index as CGGlyph;
let bitmap = is_bitmap_font(ct_font);
let (x_offset, y_offset) = if bitmap { (0.0, 0.0) } else { font.get_subpx_offset(key) };
let transform = if font.flags.intersects(FontInstanceFlags::SYNTHETIC_ITALICS |
FontInstanceFlags::TRANSPOSE |
FontInstanceFlags::FLIP_X |
FontInstanceFlags::FLIP_Y) {
let mut shape = FontTransform::identity();
if font.flags.contains(FontInstanceFlags::FLIP_X) {
shape = shape.flip_x();
}
if font.flags.contains(FontInstanceFlags::FLIP_Y) {
shape = shape.flip_y();
}
if font.flags.contains(FontInstanceFlags::TRANSPOSE) {
shape = shape.swap_xy();
}
if font.flags.contains(FontInstanceFlags::SYNTHETIC_ITALICS) {
shape = shape.synthesize_italics(OBLIQUE_SKEW_FACTOR);
}
Some(CGAffineTransform {
a: shape.scale_x as f64,
b: -shape.skew_y as f64,
c: -shape.skew_x as f64,
d: shape.scale_y as f64,
tx: 0.0,
ty: 0.0,
})
} else {
None
};
let extra_strikes = font.get_extra_strikes(1.0);
let metrics = get_glyph_metrics(
ct_font,
transform.as_ref(),
glyph,
x_offset,
y_offset,
extra_strikes as f64,
);
if metrics.rasterized_width == 0 || metrics.rasterized_height == 0 {
None
} else {
Some(GlyphDimensions {
left: metrics.rasterized_left,
top: metrics.rasterized_ascent,
width: metrics.rasterized_width as u32,
height: metrics.rasterized_height as u32,
advance: metrics.advance,
})
}
})
}
// Assumes the pixels here are linear values from CG
#[cfg(not(feature = "pathfinder"))]
fn gamma_correct_pixels(
&self,
pixels: &mut Vec<u8>,
render_mode: FontRenderMode,
color: ColorU,
) {
// Then convert back to gamma corrected values.
match render_mode {
FontRenderMode::Alpha => {
self.gamma_lut.preblend_grayscale(pixels, color);
}
FontRenderMode::Subpixel => {
self.gamma_lut.preblend(pixels, color);
}
_ => {} // Again, give mono untouched since only the alpha matters.
}
}
#[allow(dead_code)]
fn print_glyph_data(&mut self, data: &[u8], width: usize, height: usize) {
// Rust doesn't have step_by support on stable :(
println!("Width is: {:?} height: {:?}", width, height);
for i in 0.. height {
let current_height = i * width * 4;
for pixel in data[current_height.. current_height + (width * 4)].chunks(4) {
let b = pixel[0];
let g = pixel[1];
let r = pixel[2];
let a = pixel[3];
print!("({}, {}, {}, {}) ", r, g, b, a);
}
println!();
}
}
pub fn prepare_font(font: &mut FontInstance) {
match font.render_mode {
FontRenderMode::Mono => {
// In mono mode the color of the font is irrelevant.
font.color = ColorU::new(255, 255, 255, 255);
// Subpixel positioning is disabled in mono mode.
font.subpx_dir = SubpixelDirection::None;
}
FontRenderMode::Alpha => {
font.color = if font.flags.contains(FontInstanceFlags::FONT_SMOOTHING) {
// Only the G channel is used to index grayscale tables,
// so use R and B to preserve light/dark determination.
let ColorU { g, a,.. } = font.color.luminance_color().quantized_ceil();
let rb = if should_use_white_on_black(font.color) { 255 } else { 0 };
ColorU::new(rb, g, rb, a)
} else {
ColorU::new(255, 255, 255, 255)
};
}
FontRenderMode::Subpixel => {
// Quantization may change the light/dark determination, so quantize in the
// direction necessary to respect the threshold.
font.color = if should_use_white_on_black(font | supports_subpixel_aa | identifier_name |
|
font.rs | round out to pixel boundaries
// CG Origin is bottom left
let mut left = bounds.origin.x.floor() as i32;
let mut bottom = bounds.origin.y.floor() as i32;
let mut right = (bounds.origin.x + bounds.size.width + x_offset).ceil() as i32;
let mut top = (bounds.origin.y + bounds.size.height + y_offset).ceil() as i32;
// Expand the bounds by 1 pixel, to give CG room for anti-aliasing.
// Note that this outset is to allow room for LCD smoothed glyphs. However, the correct outset
// is not currently known, as CG dilates the outlines by some percentage.
// This is taken from Skia.
left -= 1;
bottom -= 1;
right += 1;
top += 1;
let width = right - left;
let height = top - bottom;
GlyphMetrics {
rasterized_left: left,
rasterized_width: width as u32,
rasterized_height: height as u32,
rasterized_ascent: top,
rasterized_descent: -bottom,
advance: advance.width as f32,
}
}
#[link(name = "ApplicationServices", kind = "framework")]
extern {
static kCTFontVariationAxisIdentifierKey: CFStringRef;
static kCTFontVariationAxisNameKey: CFStringRef;
static kCTFontVariationAxisMinimumValueKey: CFStringRef;
static kCTFontVariationAxisMaximumValueKey: CFStringRef;
static kCTFontVariationAxisDefaultValueKey: CFStringRef;
fn CTFontCopyVariationAxes(font: CTFontRef) -> CFArrayRef;
}
fn new_ct_font_with_variations(cg_font: &CGFont, size: f64, variations: &[FontVariation]) -> CTFont {
unsafe {
let ct_font = core_text::font::new_from_CGFont(cg_font, size);
if variations.is_empty() {
return ct_font;
}
let axes_ref = CTFontCopyVariationAxes(ct_font.as_concrete_TypeRef());
if axes_ref.is_null() {
return ct_font;
}
let axes: CFArray<CFDictionary> = TCFType::wrap_under_create_rule(axes_ref);
let mut vals: Vec<(CFString, CFNumber)> = Vec::with_capacity(variations.len() as usize);
for axis in axes.iter() {
if!axis.instance_of::<CFDictionary>() {
return ct_font;
}
let tag_val = match axis.find(kCTFontVariationAxisIdentifierKey as *const _) {
Some(tag_ptr) => {
let tag: CFNumber = TCFType::wrap_under_get_rule(tag_ptr as CFNumberRef);
if!tag.instance_of::<CFNumber>() {
return ct_font;
}
match tag.to_i64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let mut val = match variations.iter().find(|variation| (variation.tag as i64) == tag_val) {
Some(variation) => variation.value as f64,
None => continue,
};
let name: CFString = match axis.find(kCTFontVariationAxisNameKey as *const _) {
Some(name_ptr) => TCFType::wrap_under_get_rule(name_ptr as CFStringRef),
None => return ct_font,
};
if!name.instance_of::<CFString>() {
return ct_font;
}
let min_val = match axis.find(kCTFontVariationAxisMinimumValueKey as *const _) {
Some(min_ptr) => {
let min: CFNumber = TCFType::wrap_under_get_rule(min_ptr as CFNumberRef);
if!min.instance_of::<CFNumber>() {
return ct_font;
}
match min.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let max_val = match axis.find(kCTFontVariationAxisMaximumValueKey as *const _) {
Some(max_ptr) => {
let max: CFNumber = TCFType::wrap_under_get_rule(max_ptr as CFNumberRef);
if!max.instance_of::<CFNumber>() {
return ct_font;
}
match max.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let def_val = match axis.find(kCTFontVariationAxisDefaultValueKey as *const _) {
Some(def_ptr) => {
let def: CFNumber = TCFType::wrap_under_get_rule(def_ptr as CFNumberRef);
if!def.instance_of::<CFNumber>() {
return ct_font;
}
match def.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
val = val.max(min_val).min(max_val);
if val!= def_val {
vals.push((name, CFNumber::from(val)));
}
}
if vals.is_empty() {
return ct_font;
}
let vals_dict = CFDictionary::from_CFType_pairs(&vals);
let cg_var_font = cg_font.create_copy_from_variations(&vals_dict).unwrap();
core_text::font::new_from_CGFont_with_variations(&cg_var_font, size, &vals_dict)
}
}
fn is_bitmap_font(ct_font: &CTFont) -> bool {
let traits = ct_font.symbolic_traits();
(traits & kCTFontColorGlyphsTrait)!= 0
}
// Skew factor matching Gecko/CG.
const OBLIQUE_SKEW_FACTOR: f32 = 0.25;
impl FontContext {
pub fn new() -> Result<FontContext, ResourceCacheError> {
debug!("Test for subpixel AA support: {}", supports_subpixel_aa());
// Force CG to use sRGB color space to gamma correct.
let contrast = 0.0;
let gamma = 0.0;
Ok(FontContext {
cg_fonts: FastHashMap::default(),
ct_fonts: FastHashMap::default(),
gamma_lut: GammaLut::new(contrast, gamma, gamma),
})
}
pub fn has_font(&self, font_key: &FontKey) -> bool {
self.cg_fonts.contains_key(font_key)
}
pub fn add_raw_font(&mut self, font_key: &FontKey, bytes: Arc<Vec<u8>>, index: u32) {
if self.cg_fonts.contains_key(font_key) {
return;
}
assert_eq!(index, 0);
let data_provider = CGDataProvider::from_buffer(bytes);
let cg_font = match CGFont::from_data_provider(data_provider) {
Err(_) => return,
Ok(cg_font) => cg_font,
};
self.cg_fonts.insert(*font_key, cg_font);
}
pub fn add_native_font(&mut self, font_key: &FontKey, native_font_handle: NativeFontHandle) {
if self.cg_fonts.contains_key(font_key) {
return;
}
self.cg_fonts
.insert(*font_key, native_font_handle.0);
}
pub fn delete_font(&mut self, font_key: &FontKey) |
fn get_ct_font(
&mut self,
font_key: FontKey,
size: Au,
variations: &[FontVariation],
) -> Option<CTFont> {
match self.ct_fonts.entry((font_key, size, variations.to_vec())) {
Entry::Occupied(entry) => Some((*entry.get()).clone()),
Entry::Vacant(entry) => {
let cg_font = match self.cg_fonts.get(&font_key) {
None => return None,
Some(cg_font) => cg_font,
};
let ct_font = new_ct_font_with_variations(cg_font, size.to_f64_px(), variations);
entry.insert(ct_font.clone());
Some(ct_font)
}
}
}
pub fn get_glyph_index(&mut self, font_key: FontKey, ch: char) -> Option<u32> {
let character = ch as u16;
let mut glyph = 0;
self.get_ct_font(font_key, Au::from_px(16), &[])
.and_then(|ref ct_font| {
let result = ct_font.get_glyphs_for_characters(&character, &mut glyph, 1);
if result {
Some(glyph as u32)
} else {
None
}
})
}
pub fn get_glyph_dimensions(
&mut self,
font: &FontInstance,
key: &GlyphKey,
) -> Option<GlyphDimensions> {
self.get_ct_font(font.font_key, font.size, &font.variations)
.and_then(|ref ct_font| {
let glyph = key.index as CGGlyph;
let bitmap = is_bitmap_font(ct_font);
let (x_offset, y_offset) = if bitmap { (0.0, 0.0) } else { font.get_subpx_offset(key) };
let transform = if font.flags.intersects(FontInstanceFlags::SYNTHETIC_ITALICS |
FontInstanceFlags::TRANSPOSE |
FontInstanceFlags::FLIP_X |
FontInstanceFlags::FLIP_Y) {
let mut shape = FontTransform::identity();
if font.flags.contains(FontInstanceFlags::FLIP_X) {
shape = shape.flip_x();
}
if font.flags.contains(FontInstanceFlags::FLIP_Y) {
shape = shape.flip_y();
}
if font.flags.contains(FontInstanceFlags::TRANSPOSE) {
shape = shape.swap_xy();
}
if font.flags.contains(FontInstanceFlags::SYNTHETIC_ITALICS) {
shape = shape.synthesize_italics(OBLIQUE_SKEW_FACTOR);
}
Some(CGAffineTransform {
a: shape.scale_x as f64,
b: -shape.skew_y as f64,
c: -shape.skew_x as f64,
d: shape.scale_y as f64,
tx: 0.0,
ty: 0.0,
})
} else {
None
};
let extra_strikes = font.get_extra_strikes(1.0);
let metrics = get_glyph_metrics(
ct_font,
transform.as_ref(),
glyph,
x_offset,
y_offset,
extra_strikes as f64,
);
if metrics.rasterized_width == 0 || metrics.rasterized_height == 0 {
None
} else {
Some(GlyphDimensions {
left: metrics.rasterized_left,
top: metrics.rasterized_ascent,
width: metrics.rasterized_width as u32,
height: metrics.rasterized_height as u32,
advance: metrics.advance,
})
}
})
}
// Assumes the pixels here are linear values from CG
#[cfg(not(feature = "pathfinder"))]
fn gamma_correct_pixels(
&self,
pixels: &mut Vec<u8>,
render_mode: FontRenderMode,
color: ColorU,
) {
// Then convert back to gamma corrected values.
match render_mode {
FontRenderMode::Alpha => {
self.gamma_lut.preblend_grayscale(pixels, color);
}
FontRenderMode::Subpixel => {
self.gamma_lut.preblend(pixels, color);
}
_ => {} // Again, give mono untouched since only the alpha matters.
}
}
#[allow(dead_code)]
fn print_glyph_data(&mut self, data: &[u8], width: usize, height: usize) {
// Rust doesn't have step_by support on stable :(
println!("Width is: {:?} height: {:?}", width, height);
for i in 0.. height {
let current_height = i * width * 4;
for pixel in data[current_height.. current_height + (width * 4)].chunks(4) {
let b = pixel[0];
let g = pixel[1];
let r = pixel[2];
let a = pixel[3];
print!("({}, {}, {}, {}) ", r, g, b, a);
}
println!();
}
}
pub fn prepare_font(font: &mut FontInstance) {
match font.render_mode {
FontRenderMode::Mono => {
// In mono mode the color of the font is irrelevant.
font.color = ColorU::new(255, 255, 255, 255);
// Subpixel positioning is disabled in mono mode.
font.subpx_dir = SubpixelDirection::None;
}
FontRenderMode::Alpha => {
font.color = if font.flags.contains(FontInstanceFlags::FONT_SMOOTHING) {
// Only the G channel is used to index grayscale tables,
// so use R and B to preserve light/dark determination.
let ColorU { g, a,.. } = font.color.luminance_color().quantized_ceil();
let rb = if should_use_white_on_black(font.color) { 255 } else { 0 };
ColorU::new(rb, g, rb, a)
} else {
ColorU::new(255, 255, 255, 255)
};
}
FontRenderMode::Subpixel => {
// Quantization may change the light/dark determination, so quantize in the
// direction necessary to respect the threshold.
font.color = if should_use_white_on_black(font.color) {
font.color.quantized_ceil()
} else {
font.color.quantized_floor()
};
}
}
}
#[cfg(not(feature = "pathfinder"))]
pub fn rasterize_glyph(&mut self, font: &FontInstance, key: &GlyphKey) -> GlyphRasterResult {
let (x_scale, y_scale) = font.transform.compute_scale().unwrap_or((1.0, 1.0));
let size = font.size.scale_by(y_scale as f32);
let ct_font = match self.get_ct_font(font.font_key, size, &font.variations) {
Some(font) => font,
None => return GlyphRasterResult::LoadFailed,
};
let bitmap = is_bitmap_font(&ct_font);
let (mut shape, (x_offset, y_offset)) = if bitmap {
(FontTransform::identity(), (0.0, 0.0))
} else {
(font.transform.invert_scale(y_scale, y_scale), font.get_subpx_offset(key))
};
if font.flags.contains(FontInstanceFlags::FLIP_X) {
shape = shape.flip_x();
}
if font.flags.contains(FontInstanceFlags::FLIP_Y) {
shape = shape.flip_y();
}
if font.flags.contains(FontInstanceFlags::TRANSPOSE) {
shape = shape.swap_xy();
}
if font.flags.contains(FontInstanceFlags::SYNTHETIC_ITALICS) {
shape = shape.synthesize_italics(OBLIQUE_SKEW_FACTOR);
}
let transform = if!shape.is_identity() {
Some(CGAffineTransform {
a: shape.scale_x as f64,
b: -shape.skew_y as f64,
c: -shape.skew_x as f64,
d: shape.scale_y as f64,
tx: 0.0,
ty: 0.0,
})
} else {
None
};
let glyph = key.index as CGGlyph;
let (strike_scale, pixel_step) = if bitmap { (y_scale, 1.0) } else { (x_scale, y_scale / x_scale) };
let extra_strikes = font.get_extra_strikes(strike | {
if let Some(_) = self.cg_fonts.remove(font_key) {
self.ct_fonts.retain(|k, _| k.0 != *font_key);
}
} | identifier_body |
font.rs | round out to pixel boundaries
// CG Origin is bottom left
let mut left = bounds.origin.x.floor() as i32;
let mut bottom = bounds.origin.y.floor() as i32;
let mut right = (bounds.origin.x + bounds.size.width + x_offset).ceil() as i32;
let mut top = (bounds.origin.y + bounds.size.height + y_offset).ceil() as i32;
// Expand the bounds by 1 pixel, to give CG room for anti-aliasing.
// Note that this outset is to allow room for LCD smoothed glyphs. However, the correct outset
// is not currently known, as CG dilates the outlines by some percentage.
// This is taken from Skia.
left -= 1;
bottom -= 1;
right += 1;
top += 1;
let width = right - left;
let height = top - bottom;
GlyphMetrics {
rasterized_left: left,
rasterized_width: width as u32,
rasterized_height: height as u32,
rasterized_ascent: top,
rasterized_descent: -bottom,
advance: advance.width as f32,
}
}
#[link(name = "ApplicationServices", kind = "framework")]
extern {
static kCTFontVariationAxisIdentifierKey: CFStringRef;
static kCTFontVariationAxisNameKey: CFStringRef;
static kCTFontVariationAxisMinimumValueKey: CFStringRef;
static kCTFontVariationAxisMaximumValueKey: CFStringRef;
static kCTFontVariationAxisDefaultValueKey: CFStringRef;
fn CTFontCopyVariationAxes(font: CTFontRef) -> CFArrayRef;
}
fn new_ct_font_with_variations(cg_font: &CGFont, size: f64, variations: &[FontVariation]) -> CTFont {
unsafe {
let ct_font = core_text::font::new_from_CGFont(cg_font, size);
if variations.is_empty() {
return ct_font;
}
let axes_ref = CTFontCopyVariationAxes(ct_font.as_concrete_TypeRef());
if axes_ref.is_null() {
return ct_font;
}
let axes: CFArray<CFDictionary> = TCFType::wrap_under_create_rule(axes_ref);
let mut vals: Vec<(CFString, CFNumber)> = Vec::with_capacity(variations.len() as usize);
for axis in axes.iter() {
if!axis.instance_of::<CFDictionary>() {
return ct_font;
}
let tag_val = match axis.find(kCTFontVariationAxisIdentifierKey as *const _) {
Some(tag_ptr) => |
None => return ct_font,
};
let mut val = match variations.iter().find(|variation| (variation.tag as i64) == tag_val) {
Some(variation) => variation.value as f64,
None => continue,
};
let name: CFString = match axis.find(kCTFontVariationAxisNameKey as *const _) {
Some(name_ptr) => TCFType::wrap_under_get_rule(name_ptr as CFStringRef),
None => return ct_font,
};
if!name.instance_of::<CFString>() {
return ct_font;
}
let min_val = match axis.find(kCTFontVariationAxisMinimumValueKey as *const _) {
Some(min_ptr) => {
let min: CFNumber = TCFType::wrap_under_get_rule(min_ptr as CFNumberRef);
if!min.instance_of::<CFNumber>() {
return ct_font;
}
match min.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let max_val = match axis.find(kCTFontVariationAxisMaximumValueKey as *const _) {
Some(max_ptr) => {
let max: CFNumber = TCFType::wrap_under_get_rule(max_ptr as CFNumberRef);
if!max.instance_of::<CFNumber>() {
return ct_font;
}
match max.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
let def_val = match axis.find(kCTFontVariationAxisDefaultValueKey as *const _) {
Some(def_ptr) => {
let def: CFNumber = TCFType::wrap_under_get_rule(def_ptr as CFNumberRef);
if!def.instance_of::<CFNumber>() {
return ct_font;
}
match def.to_f64() {
Some(val) => val,
None => return ct_font,
}
}
None => return ct_font,
};
val = val.max(min_val).min(max_val);
if val!= def_val {
vals.push((name, CFNumber::from(val)));
}
}
if vals.is_empty() {
return ct_font;
}
let vals_dict = CFDictionary::from_CFType_pairs(&vals);
let cg_var_font = cg_font.create_copy_from_variations(&vals_dict).unwrap();
core_text::font::new_from_CGFont_with_variations(&cg_var_font, size, &vals_dict)
}
}
fn is_bitmap_font(ct_font: &CTFont) -> bool {
let traits = ct_font.symbolic_traits();
(traits & kCTFontColorGlyphsTrait)!= 0
}
// Skew factor matching Gecko/CG.
const OBLIQUE_SKEW_FACTOR: f32 = 0.25;
impl FontContext {
pub fn new() -> Result<FontContext, ResourceCacheError> {
debug!("Test for subpixel AA support: {}", supports_subpixel_aa());
// Force CG to use sRGB color space to gamma correct.
let contrast = 0.0;
let gamma = 0.0;
Ok(FontContext {
cg_fonts: FastHashMap::default(),
ct_fonts: FastHashMap::default(),
gamma_lut: GammaLut::new(contrast, gamma, gamma),
})
}
pub fn has_font(&self, font_key: &FontKey) -> bool {
self.cg_fonts.contains_key(font_key)
}
pub fn add_raw_font(&mut self, font_key: &FontKey, bytes: Arc<Vec<u8>>, index: u32) {
if self.cg_fonts.contains_key(font_key) {
return;
}
assert_eq!(index, 0);
let data_provider = CGDataProvider::from_buffer(bytes);
let cg_font = match CGFont::from_data_provider(data_provider) {
Err(_) => return,
Ok(cg_font) => cg_font,
};
self.cg_fonts.insert(*font_key, cg_font);
}
pub fn add_native_font(&mut self, font_key: &FontKey, native_font_handle: NativeFontHandle) {
if self.cg_fonts.contains_key(font_key) {
return;
}
self.cg_fonts
.insert(*font_key, native_font_handle.0);
}
pub fn delete_font(&mut self, font_key: &FontKey) {
if let Some(_) = self.cg_fonts.remove(font_key) {
self.ct_fonts.retain(|k, _| k.0!= *font_key);
}
}
fn get_ct_font(
&mut self,
font_key: FontKey,
size: Au,
variations: &[FontVariation],
) -> Option<CTFont> {
match self.ct_fonts.entry((font_key, size, variations.to_vec())) {
Entry::Occupied(entry) => Some((*entry.get()).clone()),
Entry::Vacant(entry) => {
let cg_font = match self.cg_fonts.get(&font_key) {
None => return None,
Some(cg_font) => cg_font,
};
let ct_font = new_ct_font_with_variations(cg_font, size.to_f64_px(), variations);
entry.insert(ct_font.clone());
Some(ct_font)
}
}
}
pub fn get_glyph_index(&mut self, font_key: FontKey, ch: char) -> Option<u32> {
let character = ch as u16;
let mut glyph = 0;
self.get_ct_font(font_key, Au::from_px(16), &[])
.and_then(|ref ct_font| {
let result = ct_font.get_glyphs_for_characters(&character, &mut glyph, 1);
if result {
Some(glyph as u32)
} else {
None
}
})
}
pub fn get_glyph_dimensions(
&mut self,
font: &FontInstance,
key: &GlyphKey,
) -> Option<GlyphDimensions> {
self.get_ct_font(font.font_key, font.size, &font.variations)
.and_then(|ref ct_font| {
let glyph = key.index as CGGlyph;
let bitmap = is_bitmap_font(ct_font);
let (x_offset, y_offset) = if bitmap { (0.0, 0.0) } else { font.get_subpx_offset(key) };
let transform = if font.flags.intersects(FontInstanceFlags::SYNTHETIC_ITALICS |
FontInstanceFlags::TRANSPOSE |
FontInstanceFlags::FLIP_X |
FontInstanceFlags::FLIP_Y) {
let mut shape = FontTransform::identity();
if font.flags.contains(FontInstanceFlags::FLIP_X) {
shape = shape.flip_x();
}
if font.flags.contains(FontInstanceFlags::FLIP_Y) {
shape = shape.flip_y();
}
if font.flags.contains(FontInstanceFlags::TRANSPOSE) {
shape = shape.swap_xy();
}
if font.flags.contains(FontInstanceFlags::SYNTHETIC_ITALICS) {
shape = shape.synthesize_italics(OBLIQUE_SKEW_FACTOR);
}
Some(CGAffineTransform {
a: shape.scale_x as f64,
b: -shape.skew_y as f64,
c: -shape.skew_x as f64,
d: shape.scale_y as f64,
tx: 0.0,
ty: 0.0,
})
} else {
None
};
let extra_strikes = font.get_extra_strikes(1.0);
let metrics = get_glyph_metrics(
ct_font,
transform.as_ref(),
glyph,
x_offset,
y_offset,
extra_strikes as f64,
);
if metrics.rasterized_width == 0 || metrics.rasterized_height == 0 {
None
} else {
Some(GlyphDimensions {
left: metrics.rasterized_left,
top: metrics.rasterized_ascent,
width: metrics.rasterized_width as u32,
height: metrics.rasterized_height as u32,
advance: metrics.advance,
})
}
})
}
// Assumes the pixels here are linear values from CG
#[cfg(not(feature = "pathfinder"))]
fn gamma_correct_pixels(
&self,
pixels: &mut Vec<u8>,
render_mode: FontRenderMode,
color: ColorU,
) {
// Then convert back to gamma corrected values.
match render_mode {
FontRenderMode::Alpha => {
self.gamma_lut.preblend_grayscale(pixels, color);
}
FontRenderMode::Subpixel => {
self.gamma_lut.preblend(pixels, color);
}
_ => {} // Again, give mono untouched since only the alpha matters.
}
}
#[allow(dead_code)]
fn print_glyph_data(&mut self, data: &[u8], width: usize, height: usize) {
// Rust doesn't have step_by support on stable :(
println!("Width is: {:?} height: {:?}", width, height);
for i in 0.. height {
let current_height = i * width * 4;
for pixel in data[current_height.. current_height + (width * 4)].chunks(4) {
let b = pixel[0];
let g = pixel[1];
let r = pixel[2];
let a = pixel[3];
print!("({}, {}, {}, {}) ", r, g, b, a);
}
println!();
}
}
pub fn prepare_font(font: &mut FontInstance) {
match font.render_mode {
FontRenderMode::Mono => {
// In mono mode the color of the font is irrelevant.
font.color = ColorU::new(255, 255, 255, 255);
// Subpixel positioning is disabled in mono mode.
font.subpx_dir = SubpixelDirection::None;
}
FontRenderMode::Alpha => {
font.color = if font.flags.contains(FontInstanceFlags::FONT_SMOOTHING) {
// Only the G channel is used to index grayscale tables,
// so use R and B to preserve light/dark determination.
let ColorU { g, a,.. } = font.color.luminance_color().quantized_ceil();
let rb = if should_use_white_on_black(font.color) { 255 } else { 0 };
ColorU::new(rb, g, rb, a)
} else {
ColorU::new(255, 255, 255, 255)
};
}
FontRenderMode::Subpixel => {
// Quantization may change the light/dark determination, so quantize in the
// direction necessary to respect the threshold.
font.color = if should_use_white_on_black(font.color) {
font.color.quantized_ceil()
} else {
font.color.quantized_floor()
};
}
}
}
#[cfg(not(feature = "pathfinder"))]
pub fn rasterize_glyph(&mut self, font: &FontInstance, key: &GlyphKey) -> GlyphRasterResult {
let (x_scale, y_scale) = font.transform.compute_scale().unwrap_or((1.0, 1.0));
let size = font.size.scale_by(y_scale as f32);
let ct_font = match self.get_ct_font(font.font_key, size, &font.variations) {
Some(font) => font,
None => return GlyphRasterResult::LoadFailed,
};
let bitmap = is_bitmap_font(&ct_font);
let (mut shape, (x_offset, y_offset)) = if bitmap {
(FontTransform::identity(), (0.0, 0.0))
} else {
(font.transform.invert_scale(y_scale, y_scale), font.get_subpx_offset(key))
};
if font.flags.contains(FontInstanceFlags::FLIP_X) {
shape = shape.flip_x();
}
if font.flags.contains(FontInstanceFlags::FLIP_Y) {
shape = shape.flip_y();
}
if font.flags.contains(FontInstanceFlags::TRANSPOSE) {
shape = shape.swap_xy();
}
if font.flags.contains(FontInstanceFlags::SYNTHETIC_ITALICS) {
shape = shape.synthesize_italics(OBLIQUE_SKEW_FACTOR);
}
let transform = if!shape.is_identity() {
Some(CGAffineTransform {
a: shape.scale_x as f64,
b: -shape.skew_y as f64,
c: -shape.skew_x as f64,
d: shape.scale_y as f64,
tx: 0.0,
ty: 0.0,
})
} else {
None
};
let glyph = key.index as CGGlyph;
let (strike_scale, pixel_step) = if bitmap { (y_scale, 1.0) } else { (x_scale, y_scale / x_scale) };
let extra_strikes = font.get_extra_strikes(strike | {
let tag: CFNumber = TCFType::wrap_under_get_rule(tag_ptr as CFNumberRef);
if !tag.instance_of::<CFNumber>() {
return ct_font;
}
match tag.to_i64() {
Some(val) => val,
None => return ct_font,
}
} | conditional_block |
options.rs | use std::str::FromStr;
use crate::params_style::ParamStyle;
use crate::rpc_attr::path_eq_str;
const CLIENT_META_WORD: &str = "client";
const SERVER_META_WORD: &str = "server";
const PARAMS_META_KEY: &str = "params";
#[derive(Debug)]
pub struct DeriveOptions {
pub enable_client: bool,
pub enable_server: bool,
pub params_style: ParamStyle,
}
impl DeriveOptions {
pub fn new(enable_client: bool, enable_server: bool, params_style: ParamStyle) -> Self {
DeriveOptions {
enable_client,
enable_server,
params_style,
}
}
pub fn try_from(args: syn::AttributeArgs) -> Result<Self, syn::Error> | syn::Meta::NameValue(nv) => {
if path_eq_str(&nv.path, PARAMS_META_KEY) {
if let syn::Lit::Str(ref lit) = nv.lit {
options.params_style = ParamStyle::from_str(&lit.value())
.map_err(|e| syn::Error::new_spanned(nv.clone(), e))?;
}
} else {
return Err(syn::Error::new_spanned(nv, "Unexpected RPC attribute key"));
}
}
_ => return Err(syn::Error::new_spanned(meta, "Unexpected use of RPC attribute macro")),
}
}
}
if!options.enable_client &&!options.enable_server {
// if nothing provided default to both
options.enable_client = true;
options.enable_server = true;
}
if options.enable_server && options.params_style == ParamStyle::Named {
// This is not allowed at this time
panic!("Server code generation only supports `params = \"positional\"` (default) or `params = \"raw\" at this time.")
}
Ok(options)
}
}
| {
let mut options = DeriveOptions::new(false, false, ParamStyle::default());
for arg in args {
if let syn::NestedMeta::Meta(meta) = arg {
match meta {
syn::Meta::Path(ref p) => {
match p
.get_ident()
.ok_or(syn::Error::new_spanned(
p,
format!("Expecting identifier `{}` or `{}`", CLIENT_META_WORD, SERVER_META_WORD),
))?
.to_string()
.as_ref()
{
CLIENT_META_WORD => options.enable_client = true,
SERVER_META_WORD => options.enable_server = true,
_ => {}
};
} | identifier_body |
options.rs | use std::str::FromStr;
use crate::params_style::ParamStyle;
use crate::rpc_attr::path_eq_str;
const CLIENT_META_WORD: &str = "client";
const SERVER_META_WORD: &str = "server";
const PARAMS_META_KEY: &str = "params";
#[derive(Debug)]
pub struct DeriveOptions {
pub enable_client: bool,
pub enable_server: bool,
pub params_style: ParamStyle,
}
impl DeriveOptions {
pub fn new(enable_client: bool, enable_server: bool, params_style: ParamStyle) -> Self {
DeriveOptions {
enable_client,
enable_server,
params_style,
}
}
pub fn try_from(args: syn::AttributeArgs) -> Result<Self, syn::Error> {
let mut options = DeriveOptions::new(false, false, ParamStyle::default());
for arg in args {
if let syn::NestedMeta::Meta(meta) = arg {
match meta {
syn::Meta::Path(ref p) => {
match p
.get_ident()
.ok_or(syn::Error::new_spanned(
p,
format!("Expecting identifier `{}` or `{}`", CLIENT_META_WORD, SERVER_META_WORD),
))?
.to_string()
.as_ref()
{
CLIENT_META_WORD => options.enable_client = true,
SERVER_META_WORD => options.enable_server = true,
_ => {}
};
}
syn::Meta::NameValue(nv) => {
if path_eq_str(&nv.path, PARAMS_META_KEY) {
if let syn::Lit::Str(ref lit) = nv.lit {
options.params_style = ParamStyle::from_str(&lit.value())
.map_err(|e| syn::Error::new_spanned(nv.clone(), e))?; | }
_ => return Err(syn::Error::new_spanned(meta, "Unexpected use of RPC attribute macro")),
}
}
}
if!options.enable_client &&!options.enable_server {
// if nothing provided default to both
options.enable_client = true;
options.enable_server = true;
}
if options.enable_server && options.params_style == ParamStyle::Named {
// This is not allowed at this time
panic!("Server code generation only supports `params = \"positional\"` (default) or `params = \"raw\" at this time.")
}
Ok(options)
}
} | }
} else {
return Err(syn::Error::new_spanned(nv, "Unexpected RPC attribute key"));
} | random_line_split |
options.rs | use std::str::FromStr;
use crate::params_style::ParamStyle;
use crate::rpc_attr::path_eq_str;
const CLIENT_META_WORD: &str = "client";
const SERVER_META_WORD: &str = "server";
const PARAMS_META_KEY: &str = "params";
#[derive(Debug)]
pub struct DeriveOptions {
pub enable_client: bool,
pub enable_server: bool,
pub params_style: ParamStyle,
}
impl DeriveOptions {
pub fn new(enable_client: bool, enable_server: bool, params_style: ParamStyle) -> Self {
DeriveOptions {
enable_client,
enable_server,
params_style,
}
}
pub fn try_from(args: syn::AttributeArgs) -> Result<Self, syn::Error> {
let mut options = DeriveOptions::new(false, false, ParamStyle::default());
for arg in args {
if let syn::NestedMeta::Meta(meta) = arg {
match meta {
syn::Meta::Path(ref p) => {
match p
.get_ident()
.ok_or(syn::Error::new_spanned(
p,
format!("Expecting identifier `{}` or `{}`", CLIENT_META_WORD, SERVER_META_WORD),
))?
.to_string()
.as_ref()
{
CLIENT_META_WORD => options.enable_client = true,
SERVER_META_WORD => options.enable_server = true,
_ => {}
};
}
syn::Meta::NameValue(nv) => {
if path_eq_str(&nv.path, PARAMS_META_KEY) {
if let syn::Lit::Str(ref lit) = nv.lit {
options.params_style = ParamStyle::from_str(&lit.value())
.map_err(|e| syn::Error::new_spanned(nv.clone(), e))?;
}
} else {
return Err(syn::Error::new_spanned(nv, "Unexpected RPC attribute key"));
}
}
_ => return Err(syn::Error::new_spanned(meta, "Unexpected use of RPC attribute macro")),
}
}
}
if!options.enable_client &&!options.enable_server |
if options.enable_server && options.params_style == ParamStyle::Named {
// This is not allowed at this time
panic!("Server code generation only supports `params = \"positional\"` (default) or `params = \"raw\" at this time.")
}
Ok(options)
}
}
| {
// if nothing provided default to both
options.enable_client = true;
options.enable_server = true;
} | conditional_block |
options.rs | use std::str::FromStr;
use crate::params_style::ParamStyle;
use crate::rpc_attr::path_eq_str;
const CLIENT_META_WORD: &str = "client";
const SERVER_META_WORD: &str = "server";
const PARAMS_META_KEY: &str = "params";
#[derive(Debug)]
pub struct DeriveOptions {
pub enable_client: bool,
pub enable_server: bool,
pub params_style: ParamStyle,
}
impl DeriveOptions {
pub fn | (enable_client: bool, enable_server: bool, params_style: ParamStyle) -> Self {
DeriveOptions {
enable_client,
enable_server,
params_style,
}
}
pub fn try_from(args: syn::AttributeArgs) -> Result<Self, syn::Error> {
let mut options = DeriveOptions::new(false, false, ParamStyle::default());
for arg in args {
if let syn::NestedMeta::Meta(meta) = arg {
match meta {
syn::Meta::Path(ref p) => {
match p
.get_ident()
.ok_or(syn::Error::new_spanned(
p,
format!("Expecting identifier `{}` or `{}`", CLIENT_META_WORD, SERVER_META_WORD),
))?
.to_string()
.as_ref()
{
CLIENT_META_WORD => options.enable_client = true,
SERVER_META_WORD => options.enable_server = true,
_ => {}
};
}
syn::Meta::NameValue(nv) => {
if path_eq_str(&nv.path, PARAMS_META_KEY) {
if let syn::Lit::Str(ref lit) = nv.lit {
options.params_style = ParamStyle::from_str(&lit.value())
.map_err(|e| syn::Error::new_spanned(nv.clone(), e))?;
}
} else {
return Err(syn::Error::new_spanned(nv, "Unexpected RPC attribute key"));
}
}
_ => return Err(syn::Error::new_spanned(meta, "Unexpected use of RPC attribute macro")),
}
}
}
if!options.enable_client &&!options.enable_server {
// if nothing provided default to both
options.enable_client = true;
options.enable_server = true;
}
if options.enable_server && options.params_style == ParamStyle::Named {
// This is not allowed at this time
panic!("Server code generation only supports `params = \"positional\"` (default) or `params = \"raw\" at this time.")
}
Ok(options)
}
}
| new | identifier_name |
module.rs | use crate::base::ModuleData;
use rustc_ast::ptr::P;
use rustc_ast::{token, Attribute, Inline, Item};
use rustc_errors::{struct_span_err, DiagnosticBuilder};
use rustc_parse::new_parser_from_file;
use rustc_parse::validate_attr;
use rustc_session::parse::ParseSess;
use rustc_session::Session;
use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
use std::path::{self, Path, PathBuf};
#[derive(Copy, Clone)]
pub enum DirOwnership {
Owned {
// None if `mod.rs`, `Some("foo")` if we're in `foo.rs`.
relative: Option<Ident>,
},
UnownedViaBlock,
}
// Public for rustfmt usage.
pub struct ModulePathSuccess {
pub file_path: PathBuf,
pub dir_ownership: DirOwnership,
}
crate struct | {
pub items: Vec<P<Item>>,
pub inner_span: Span,
pub file_path: PathBuf,
pub dir_path: PathBuf,
pub dir_ownership: DirOwnership,
}
pub enum ModError<'a> {
CircularInclusion(Vec<PathBuf>),
ModInBlock(Option<Ident>),
FileNotFound(Ident, PathBuf, PathBuf),
MultipleCandidates(Ident, PathBuf, PathBuf),
ParserError(DiagnosticBuilder<'a>),
}
crate fn parse_external_mod(
sess: &Session,
ident: Ident,
span: Span, // The span to blame on errors.
module: &ModuleData,
mut dir_ownership: DirOwnership,
attrs: &mut Vec<Attribute>,
) -> ParsedExternalMod {
// We bail on the first error, but that error does not cause a fatal error... (1)
let result: Result<_, ModError<'_>> = try {
// Extract the file path and the new ownership.
let mp = mod_file_path(sess, ident, &attrs, &module.dir_path, dir_ownership)?;
dir_ownership = mp.dir_ownership;
// Ensure file paths are acyclic.
if let Some(pos) = module.file_path_stack.iter().position(|p| p == &mp.file_path) {
Err(ModError::CircularInclusion(module.file_path_stack[pos..].to_vec()))?;
}
// Actually parse the external file as a module.
let mut parser = new_parser_from_file(&sess.parse_sess, &mp.file_path, Some(span));
let (mut inner_attrs, items, inner_span) =
parser.parse_mod(&token::Eof).map_err(|err| ModError::ParserError(err))?;
attrs.append(&mut inner_attrs);
(items, inner_span, mp.file_path)
};
// (1)...instead, we return a dummy module.
let (items, inner_span, file_path) =
result.map_err(|err| err.report(sess, span)).unwrap_or_default();
// Extract the directory path for submodules of the module.
let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
ParsedExternalMod { items, inner_span, file_path, dir_path, dir_ownership }
}
crate fn mod_dir_path(
sess: &Session,
ident: Ident,
attrs: &[Attribute],
module: &ModuleData,
mut dir_ownership: DirOwnership,
inline: Inline,
) -> (PathBuf, DirOwnership) {
match inline {
Inline::Yes if let Some(file_path) = mod_file_path_from_attr(sess, attrs, &module.dir_path) => {
// For inline modules file path from `#[path]` is actually the directory path
// for historical reasons, so we don't pop the last segment here.
(file_path, DirOwnership::Owned { relative: None })
}
Inline::Yes => {
// We have to push on the current module name in the case of relative
// paths in order to ensure that any additional module paths from inline
// `mod x {... }` come after the relative extension.
//
// For example, a `mod z {... }` inside `x/y.rs` should set the current
// directory path to `/x/y/z`, not `/x/z` with a relative offset of `y`.
let mut dir_path = module.dir_path.clone();
if let DirOwnership::Owned { relative } = &mut dir_ownership {
if let Some(ident) = relative.take() {
// Remove the relative offset.
dir_path.push(&*ident.as_str());
}
}
dir_path.push(&*ident.as_str());
(dir_path, dir_ownership)
}
Inline::No => {
// FIXME: This is a subset of `parse_external_mod` without actual parsing,
// check whether the logic for unloaded, loaded and inline modules can be unified.
let file_path = mod_file_path(sess, ident, &attrs, &module.dir_path, dir_ownership)
.map(|mp| {
dir_ownership = mp.dir_ownership;
mp.file_path
})
.unwrap_or_default();
// Extract the directory path for submodules of the module.
let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
(dir_path, dir_ownership)
}
}
}
fn mod_file_path<'a>(
sess: &'a Session,
ident: Ident,
attrs: &[Attribute],
dir_path: &Path,
dir_ownership: DirOwnership,
) -> Result<ModulePathSuccess, ModError<'a>> {
if let Some(file_path) = mod_file_path_from_attr(sess, attrs, dir_path) {
// All `#[path]` files are treated as though they are a `mod.rs` file.
// This means that `mod foo;` declarations inside `#[path]`-included
// files are siblings,
//
// Note that this will produce weirdness when a file named `foo.rs` is
// `#[path]` included and contains a `mod foo;` declaration.
// If you encounter this, it's your own darn fault :P
let dir_ownership = DirOwnership::Owned { relative: None };
return Ok(ModulePathSuccess { file_path, dir_ownership });
}
let relative = match dir_ownership {
DirOwnership::Owned { relative } => relative,
DirOwnership::UnownedViaBlock => None,
};
let result = default_submod_path(&sess.parse_sess, ident, relative, dir_path);
match dir_ownership {
DirOwnership::Owned {.. } => result,
DirOwnership::UnownedViaBlock => Err(ModError::ModInBlock(match result {
Ok(_) | Err(ModError::MultipleCandidates(..)) => Some(ident),
_ => None,
})),
}
}
/// Derive a submodule path from the first found `#[path = "path_string"]`.
/// The provided `dir_path` is joined with the `path_string`.
fn mod_file_path_from_attr(
sess: &Session,
attrs: &[Attribute],
dir_path: &Path,
) -> Option<PathBuf> {
// Extract path string from first `#[path = "path_string"]` attribute.
let first_path = attrs.iter().find(|at| at.has_name(sym::path))?;
let path_string = match first_path.value_str() {
Some(s) => s.as_str(),
None => {
// This check is here mainly to catch attempting to use a macro,
// such as #[path = concat!(...)]. This isn't currently supported
// because otherwise the InvocationCollector would need to defer
// loading a module until the #[path] attribute was expanded, and
// it doesn't support that (and would likely add a bit of
// complexity). Usually bad forms are checked in AstValidator (via
// `check_builtin_attribute`), but by the time that runs the macro
// is expanded, and it doesn't give an error.
validate_attr::emit_fatal_malformed_builtin_attribute(
&sess.parse_sess,
first_path,
sym::path,
);
}
};
// On windows, the base path might have the form
// `\\?\foo\bar` in which case it does not tolerate
// mixed `/` and `\` separators, so canonicalize
// `/` to `\`.
#[cfg(windows)]
let path_string = path_string.replace("/", "\\");
Some(dir_path.join(&*path_string))
}
/// Returns a path to a module.
// Public for rustfmt usage.
pub fn default_submod_path<'a>(
sess: &'a ParseSess,
ident: Ident,
relative: Option<Ident>,
dir_path: &Path,
) -> Result<ModulePathSuccess, ModError<'a>> {
// If we're in a foo.rs file instead of a mod.rs file,
// we need to look for submodules in
// `./foo/<ident>.rs` and `./foo/<ident>/mod.rs` rather than
// `./<ident>.rs` and `./<ident>/mod.rs`.
let relative_prefix_string;
let relative_prefix = if let Some(ident) = relative {
relative_prefix_string = format!("{}{}", ident.name, path::MAIN_SEPARATOR);
&relative_prefix_string
} else {
""
};
let mod_name = ident.name.to_string();
let default_path_str = format!("{}{}.rs", relative_prefix, mod_name);
let secondary_path_str =
format!("{}{}{}mod.rs", relative_prefix, mod_name, path::MAIN_SEPARATOR);
let default_path = dir_path.join(&default_path_str);
let secondary_path = dir_path.join(&secondary_path_str);
let default_exists = sess.source_map().file_exists(&default_path);
let secondary_exists = sess.source_map().file_exists(&secondary_path);
match (default_exists, secondary_exists) {
(true, false) => Ok(ModulePathSuccess {
file_path: default_path,
dir_ownership: DirOwnership::Owned { relative: Some(ident) },
}),
(false, true) => Ok(ModulePathSuccess {
file_path: secondary_path,
dir_ownership: DirOwnership::Owned { relative: None },
}),
(false, false) => Err(ModError::FileNotFound(ident, default_path, secondary_path)),
(true, true) => Err(ModError::MultipleCandidates(ident, default_path, secondary_path)),
}
}
impl ModError<'_> {
fn report(self, sess: &Session, span: Span) {
let diag = &sess.parse_sess.span_diagnostic;
match self {
ModError::CircularInclusion(file_paths) => {
let mut msg = String::from("circular modules: ");
for file_path in &file_paths {
msg.push_str(&file_path.display().to_string());
msg.push_str(" -> ");
}
msg.push_str(&file_paths[0].display().to_string());
diag.struct_span_err(span, &msg)
}
ModError::ModInBlock(ident) => {
let msg = "cannot declare a non-inline module inside a block unless it has a path attribute";
let mut err = diag.struct_span_err(span, msg);
if let Some(ident) = ident {
let note =
format!("maybe `use` the module `{}` instead of redeclaring it", ident);
err.span_note(span, ¬e);
}
err
}
ModError::FileNotFound(ident, default_path, secondary_path) => {
let mut err = struct_span_err!(
diag,
span,
E0583,
"file not found for module `{}`",
ident,
);
err.help(&format!(
"to create the module `{}`, create file \"{}\" or \"{}\"",
ident,
default_path.display(),
secondary_path.display(),
));
err
}
ModError::MultipleCandidates(ident, default_path, secondary_path) => {
let mut err = struct_span_err!(
diag,
span,
E0761,
"file for module `{}` found at both \"{}\" and \"{}\"",
ident,
default_path.display(),
secondary_path.display(),
);
err.help("delete or rename one of them to remove the ambiguity");
err
}
ModError::ParserError(err) => err,
}.emit()
}
}
| ParsedExternalMod | identifier_name |
module.rs | use crate::base::ModuleData;
use rustc_ast::ptr::P;
use rustc_ast::{token, Attribute, Inline, Item};
use rustc_errors::{struct_span_err, DiagnosticBuilder};
use rustc_parse::new_parser_from_file;
use rustc_parse::validate_attr;
use rustc_session::parse::ParseSess;
use rustc_session::Session;
use rustc_span::symbol::{sym, Ident};
use rustc_span::Span;
use std::path::{self, Path, PathBuf};
#[derive(Copy, Clone)]
pub enum DirOwnership {
Owned {
// None if `mod.rs`, `Some("foo")` if we're in `foo.rs`.
relative: Option<Ident>,
},
UnownedViaBlock,
}
// Public for rustfmt usage.
pub struct ModulePathSuccess {
pub file_path: PathBuf,
pub dir_ownership: DirOwnership,
}
crate struct ParsedExternalMod {
pub items: Vec<P<Item>>,
pub inner_span: Span,
pub file_path: PathBuf,
pub dir_path: PathBuf,
pub dir_ownership: DirOwnership,
}
pub enum ModError<'a> {
CircularInclusion(Vec<PathBuf>),
ModInBlock(Option<Ident>),
FileNotFound(Ident, PathBuf, PathBuf),
MultipleCandidates(Ident, PathBuf, PathBuf),
ParserError(DiagnosticBuilder<'a>),
}
crate fn parse_external_mod(
sess: &Session,
ident: Ident,
span: Span, // The span to blame on errors.
module: &ModuleData,
mut dir_ownership: DirOwnership,
attrs: &mut Vec<Attribute>,
) -> ParsedExternalMod {
// We bail on the first error, but that error does not cause a fatal error... (1)
let result: Result<_, ModError<'_>> = try {
// Extract the file path and the new ownership.
let mp = mod_file_path(sess, ident, &attrs, &module.dir_path, dir_ownership)?;
dir_ownership = mp.dir_ownership;
// Ensure file paths are acyclic.
if let Some(pos) = module.file_path_stack.iter().position(|p| p == &mp.file_path) {
Err(ModError::CircularInclusion(module.file_path_stack[pos..].to_vec()))?;
}
// Actually parse the external file as a module.
let mut parser = new_parser_from_file(&sess.parse_sess, &mp.file_path, Some(span));
let (mut inner_attrs, items, inner_span) =
parser.parse_mod(&token::Eof).map_err(|err| ModError::ParserError(err))?;
attrs.append(&mut inner_attrs);
(items, inner_span, mp.file_path)
};
// (1)...instead, we return a dummy module.
let (items, inner_span, file_path) =
result.map_err(|err| err.report(sess, span)).unwrap_or_default();
// Extract the directory path for submodules of the module.
let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
ParsedExternalMod { items, inner_span, file_path, dir_path, dir_ownership }
}
crate fn mod_dir_path(
sess: &Session,
ident: Ident,
attrs: &[Attribute],
module: &ModuleData,
mut dir_ownership: DirOwnership,
inline: Inline,
) -> (PathBuf, DirOwnership) {
match inline {
Inline::Yes if let Some(file_path) = mod_file_path_from_attr(sess, attrs, &module.dir_path) => {
// For inline modules file path from `#[path]` is actually the directory path
// for historical reasons, so we don't pop the last segment here.
(file_path, DirOwnership::Owned { relative: None })
}
Inline::Yes => {
// We have to push on the current module name in the case of relative
// paths in order to ensure that any additional module paths from inline
// `mod x {... }` come after the relative extension.
//
// For example, a `mod z {... }` inside `x/y.rs` should set the current
// directory path to `/x/y/z`, not `/x/z` with a relative offset of `y`.
let mut dir_path = module.dir_path.clone();
if let DirOwnership::Owned { relative } = &mut dir_ownership {
if let Some(ident) = relative.take() {
// Remove the relative offset.
dir_path.push(&*ident.as_str());
}
}
dir_path.push(&*ident.as_str());
(dir_path, dir_ownership)
}
Inline::No => {
// FIXME: This is a subset of `parse_external_mod` without actual parsing,
// check whether the logic for unloaded, loaded and inline modules can be unified.
let file_path = mod_file_path(sess, ident, &attrs, &module.dir_path, dir_ownership)
.map(|mp| {
dir_ownership = mp.dir_ownership;
mp.file_path
})
.unwrap_or_default();
// Extract the directory path for submodules of the module.
let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
(dir_path, dir_ownership)
}
}
}
fn mod_file_path<'a>(
sess: &'a Session,
ident: Ident,
attrs: &[Attribute],
dir_path: &Path,
dir_ownership: DirOwnership,
) -> Result<ModulePathSuccess, ModError<'a>> {
if let Some(file_path) = mod_file_path_from_attr(sess, attrs, dir_path) { | // Note that this will produce weirdness when a file named `foo.rs` is
// `#[path]` included and contains a `mod foo;` declaration.
// If you encounter this, it's your own darn fault :P
let dir_ownership = DirOwnership::Owned { relative: None };
return Ok(ModulePathSuccess { file_path, dir_ownership });
}
let relative = match dir_ownership {
DirOwnership::Owned { relative } => relative,
DirOwnership::UnownedViaBlock => None,
};
let result = default_submod_path(&sess.parse_sess, ident, relative, dir_path);
match dir_ownership {
DirOwnership::Owned {.. } => result,
DirOwnership::UnownedViaBlock => Err(ModError::ModInBlock(match result {
Ok(_) | Err(ModError::MultipleCandidates(..)) => Some(ident),
_ => None,
})),
}
}
/// Derive a submodule path from the first found `#[path = "path_string"]`.
/// The provided `dir_path` is joined with the `path_string`.
fn mod_file_path_from_attr(
sess: &Session,
attrs: &[Attribute],
dir_path: &Path,
) -> Option<PathBuf> {
// Extract path string from first `#[path = "path_string"]` attribute.
let first_path = attrs.iter().find(|at| at.has_name(sym::path))?;
let path_string = match first_path.value_str() {
Some(s) => s.as_str(),
None => {
// This check is here mainly to catch attempting to use a macro,
// such as #[path = concat!(...)]. This isn't currently supported
// because otherwise the InvocationCollector would need to defer
// loading a module until the #[path] attribute was expanded, and
// it doesn't support that (and would likely add a bit of
// complexity). Usually bad forms are checked in AstValidator (via
// `check_builtin_attribute`), but by the time that runs the macro
// is expanded, and it doesn't give an error.
validate_attr::emit_fatal_malformed_builtin_attribute(
&sess.parse_sess,
first_path,
sym::path,
);
}
};
// On windows, the base path might have the form
// `\\?\foo\bar` in which case it does not tolerate
// mixed `/` and `\` separators, so canonicalize
// `/` to `\`.
#[cfg(windows)]
let path_string = path_string.replace("/", "\\");
Some(dir_path.join(&*path_string))
}
/// Returns a path to a module.
// Public for rustfmt usage.
pub fn default_submod_path<'a>(
sess: &'a ParseSess,
ident: Ident,
relative: Option<Ident>,
dir_path: &Path,
) -> Result<ModulePathSuccess, ModError<'a>> {
// If we're in a foo.rs file instead of a mod.rs file,
// we need to look for submodules in
// `./foo/<ident>.rs` and `./foo/<ident>/mod.rs` rather than
// `./<ident>.rs` and `./<ident>/mod.rs`.
let relative_prefix_string;
let relative_prefix = if let Some(ident) = relative {
relative_prefix_string = format!("{}{}", ident.name, path::MAIN_SEPARATOR);
&relative_prefix_string
} else {
""
};
let mod_name = ident.name.to_string();
let default_path_str = format!("{}{}.rs", relative_prefix, mod_name);
let secondary_path_str =
format!("{}{}{}mod.rs", relative_prefix, mod_name, path::MAIN_SEPARATOR);
let default_path = dir_path.join(&default_path_str);
let secondary_path = dir_path.join(&secondary_path_str);
let default_exists = sess.source_map().file_exists(&default_path);
let secondary_exists = sess.source_map().file_exists(&secondary_path);
match (default_exists, secondary_exists) {
(true, false) => Ok(ModulePathSuccess {
file_path: default_path,
dir_ownership: DirOwnership::Owned { relative: Some(ident) },
}),
(false, true) => Ok(ModulePathSuccess {
file_path: secondary_path,
dir_ownership: DirOwnership::Owned { relative: None },
}),
(false, false) => Err(ModError::FileNotFound(ident, default_path, secondary_path)),
(true, true) => Err(ModError::MultipleCandidates(ident, default_path, secondary_path)),
}
}
impl ModError<'_> {
fn report(self, sess: &Session, span: Span) {
let diag = &sess.parse_sess.span_diagnostic;
match self {
ModError::CircularInclusion(file_paths) => {
let mut msg = String::from("circular modules: ");
for file_path in &file_paths {
msg.push_str(&file_path.display().to_string());
msg.push_str(" -> ");
}
msg.push_str(&file_paths[0].display().to_string());
diag.struct_span_err(span, &msg)
}
ModError::ModInBlock(ident) => {
let msg = "cannot declare a non-inline module inside a block unless it has a path attribute";
let mut err = diag.struct_span_err(span, msg);
if let Some(ident) = ident {
let note =
format!("maybe `use` the module `{}` instead of redeclaring it", ident);
err.span_note(span, ¬e);
}
err
}
ModError::FileNotFound(ident, default_path, secondary_path) => {
let mut err = struct_span_err!(
diag,
span,
E0583,
"file not found for module `{}`",
ident,
);
err.help(&format!(
"to create the module `{}`, create file \"{}\" or \"{}\"",
ident,
default_path.display(),
secondary_path.display(),
));
err
}
ModError::MultipleCandidates(ident, default_path, secondary_path) => {
let mut err = struct_span_err!(
diag,
span,
E0761,
"file for module `{}` found at both \"{}\" and \"{}\"",
ident,
default_path.display(),
secondary_path.display(),
);
err.help("delete or rename one of them to remove the ambiguity");
err
}
ModError::ParserError(err) => err,
}.emit()
}
} | // All `#[path]` files are treated as though they are a `mod.rs` file.
// This means that `mod foo;` declarations inside `#[path]`-included
// files are siblings,
// | random_line_split |
fields.rs | //! Defines the fields used in the query result structs
// don't show deprecation warnings about NewField when we build this file
#![allow(deprecated)]
use crate::prelude::*;
use serde::Deserialize;
use std::path::PathBuf;
/// This trait is used to furnish the caller with the watchman
/// field name for an entry in the file results
#[doc(hidden)]
pub trait QueryFieldName {
fn field_name() -> &'static str;
}
/// This trait is used to produce the complete list of file
/// result field names for a query
#[doc(hidden)]
pub trait QueryFieldList {
fn field_list() -> Vec<&'static str>;
}
/// This macro defines a field struct that can be composed using
/// the `query_result_type!` macro into a struct that can be used
/// with the `Client::query` method.
macro_rules! define_field {(
$(#[$meta:meta])*
$tyname:ident, $ty:ty, $field_name:literal) => {
#[derive(Deserialize, Clone, Debug)]
$(#[$meta])*
pub struct $tyname {
#[serde(rename = $field_name)]
val: $ty,
}
impl QueryFieldName for $tyname {
fn field_name() -> &'static str {
$field_name
}
}
impl $tyname {
/// Consumes the field and returns the underlying
/// value storage
pub fn into_inner(self) -> $ty {
self.val
}
}
impl std::ops::Deref for $tyname {
type Target = $ty;
fn deref(&self) -> &Self::Target {
&self.val
}
}
impl std::ops::DerefMut for $tyname {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.val
}
}
};
}
define_field!(
/// The field corresponding to the `name` of the file.
NameField,
PathBuf,
"name"
);
define_field!(
/// The field corresponding to the `exists` status of the file
ExistsField,
bool,
"exists"
);
define_field!(
/// The field corresponding to the `cclock` field.
/// the cclock is the created clock; the clock value when we first observed the file,
/// or the clock value when it last switched from!exists to exists.
CreatedClockField,
ClockSpec,
"cclock"
);
define_field!(
/// The field corresponding to the `oclock` field.
/// the oclock is the observed clock; the clock value where we last observed some
/// change in this file or its metadata.
ObservedClockField,
ClockSpec,
"cclock"
);
define_field!(
/// The field corresponding to the `content.sha1hex` field.
/// For regular files this evaluates to the sha1 hash of the
/// file contents.
ContentSha1HexField,
ContentSha1Hex,
"content.sha1hex"
);
define_field!(
/// The field corresponding to the `ctime` field.
/// ctime is the last inode change time measured in integer seconds since the
/// unix epoch.
CTimeField,
i64,
"ctime"
);
define_field!(
/// The field corresponding to the `ctime_f` field.
/// ctime is the last inode change time measured in floating point seconds
/// (including the fractional portion) since the unix epoch.
CTimeAsFloatField,
f32,
"ctime_f"
);
define_field!(
/// The field corresponding to the `mtime` field.
/// mtime is the last modified time measured in integer seconds
/// since the unix epoch.
MTimeField,
i64,
"mtime"
);
define_field!(
/// The field corresponding to the `mtime_f` field.
/// mtime is the last modified time measured in floating point seconds
/// (including the fractional portion) since the unix epoch.
MTimeAsFloatField,
f32,
"mtime_f"
);
define_field!(
/// The field corresponding to the `size` field.
/// This represents the size of the file in bytes.
SizeField,
usize,
"size"
);
define_field!(
/// The field corresponding to the `mode` field.
/// This encodes the full file type and permission bits.
/// Note that most programs and users are more comfortable with
/// this value when printed in octal.
/// It is recommended to use `FileTypeField` if all you need is the
/// file type and not the permission bits, as it is cheaper to
/// determine just the type in a virtualized filesystem.
ModeAndPermissionsField,
u64,
"mode"
);
define_field!( | "uid"
);
define_field!(
/// The field corresponding to the `gid` field.
/// The gid field is the owning gid expressed as an integer.
/// This field is not meaningful on Windows.
OwnerGidField,
u32,
"gid"
);
define_field!(
/// The field corresponding to the `ino` field.
/// The ino field is the inode number expressed as an integer.
/// This field is not meaningful on Windows.
InodeNumberField,
u64,
"ino"
);
define_field!(
/// The field corresponding to the `dev` field.
/// The dev field is the device number expressed as an integer.
/// This field is not meaningful on Windows.
DeviceNumberField,
u64,
"dev"
);
define_field!(
/// The field corresponding to the `nlink` field.
/// The nlink field is the number of hard links to the file
/// expressed as an integer.
NumberOfLinksField,
u64,
"nlink"
);
define_field!(
/// The field corresponding to the `type` field.
/// The type field encodes the type of the file.
FileTypeField,
FileType,
"type"
);
define_field!(
/// The field corresponding to the `symlink_target` field.
/// For files of type symlink this evaluates to the result
/// of readlink(2) on the file.
SymlinkTargetField,
Option<String>,
"symlink_target"
);
define_field!(
/// The field corresponding to the `new` field.
/// The new field evaluates to true if a file is newer than
/// the since generator criteria.
///
/// Use of this field is discouraged as there are a number of
/// situations in which the newness has a counter-intuitive
/// value. In addition, computing newness in a virtualized
/// filesystem is relatively expensive.
///
/// If your application needs to reason about the transition
/// from `!exists -> exists` then you should track the
/// `ExistsField` in your application.
#[deprecated(note = "NewField can have counter-intuitive \
values in a number of situations so it \
is recommended that you track \
ExistsField instead")]
NewField,
bool,
"new"
);
/// A macro to help define a type to hold file information from
/// a query.
/// This macro enables a type-safe way to define the set of fields
/// to be returned and de-serialize only those fields.
///
/// This defines a struct that will receive the name and content
/// hash fields from the results. When used together with
/// `Client::query`, the query will automatically use the appropriate
/// list of field names:
///
/// ```
/// use watchman_client::prelude::*;
/// use serde::Deserialize;
///
/// query_result_type! {
/// struct NameAndHash {
/// name: NameField,
/// hash: ContentSha1HexField,
/// }
/// }
/// ```
///
/// The struct must consist of 2 or more fields; the macro subsystem
/// won't allow for generating an appropriate type definition for a single
/// field result.
///
/// If you need only a single field, look at [NameOnly](struct.NameOnly.html).
///
/// The field types must implement an undocumented trait that enables
/// the automatic field naming and correct deserialization regardless
/// of the field name in the struct. As such, you should consider
/// the set of fields to be limited to those provided by this crate.
#[macro_export]
macro_rules! query_result_type {(
$struct_vis:vis struct $tyname:ident {
$($field_vis:vis $field_name:ident : $field_ty:ty),+ $(,)?
}
) => (
#[derive(Deserialize, Debug, Clone)]
$struct_vis struct $tyname {
$(
#[serde(flatten)]
$field_vis $field_name: $field_ty,
)*
}
impl QueryFieldList for $tyname {
fn field_list() -> Vec <&'static str> {
vec![
$(
<$field_ty>::field_name(),
)*
]
}
}
)
}
/// Use the `NameOnly` struct when your desired field list in your
/// query results consist only of the name field.
/// It is not possible to use the `query_result_type!` macro to define
/// an appropriate type due to limitations in the Rust macro system.
#[derive(Deserialize, Debug, Clone)]
#[serde(from = "PathBuf")]
pub struct NameOnly {
pub name: NameField,
}
impl QueryFieldList for NameOnly {
fn field_list() -> Vec<&'static str> {
vec!["name"]
}
}
impl From<PathBuf> for NameOnly {
fn from(path: PathBuf) -> Self {
Self {
name: NameField { val: path },
}
}
} | /// The field corresponding to the `uid` field.
/// The uid field is the owning uid expressed as an integer.
/// This field is not meaningful on Windows.
OwnerUidField,
u32, | random_line_split |
fields.rs | //! Defines the fields used in the query result structs
// don't show deprecation warnings about NewField when we build this file
#![allow(deprecated)]
use crate::prelude::*;
use serde::Deserialize;
use std::path::PathBuf;
/// This trait is used to furnish the caller with the watchman
/// field name for an entry in the file results
#[doc(hidden)]
pub trait QueryFieldName {
fn field_name() -> &'static str;
}
/// This trait is used to produce the complete list of file
/// result field names for a query
#[doc(hidden)]
pub trait QueryFieldList {
fn field_list() -> Vec<&'static str>;
}
/// This macro defines a field struct that can be composed using
/// the `query_result_type!` macro into a struct that can be used
/// with the `Client::query` method.
macro_rules! define_field {(
$(#[$meta:meta])*
$tyname:ident, $ty:ty, $field_name:literal) => {
#[derive(Deserialize, Clone, Debug)]
$(#[$meta])*
pub struct $tyname {
#[serde(rename = $field_name)]
val: $ty,
}
impl QueryFieldName for $tyname {
fn field_name() -> &'static str {
$field_name
}
}
impl $tyname {
/// Consumes the field and returns the underlying
/// value storage
pub fn into_inner(self) -> $ty {
self.val
}
}
impl std::ops::Deref for $tyname {
type Target = $ty;
fn deref(&self) -> &Self::Target {
&self.val
}
}
impl std::ops::DerefMut for $tyname {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.val
}
}
};
}
define_field!(
/// The field corresponding to the `name` of the file.
NameField,
PathBuf,
"name"
);
define_field!(
/// The field corresponding to the `exists` status of the file
ExistsField,
bool,
"exists"
);
define_field!(
/// The field corresponding to the `cclock` field.
/// the cclock is the created clock; the clock value when we first observed the file,
/// or the clock value when it last switched from!exists to exists.
CreatedClockField,
ClockSpec,
"cclock"
);
define_field!(
/// The field corresponding to the `oclock` field.
/// the oclock is the observed clock; the clock value where we last observed some
/// change in this file or its metadata.
ObservedClockField,
ClockSpec,
"cclock"
);
define_field!(
/// The field corresponding to the `content.sha1hex` field.
/// For regular files this evaluates to the sha1 hash of the
/// file contents.
ContentSha1HexField,
ContentSha1Hex,
"content.sha1hex"
);
define_field!(
/// The field corresponding to the `ctime` field.
/// ctime is the last inode change time measured in integer seconds since the
/// unix epoch.
CTimeField,
i64,
"ctime"
);
define_field!(
/// The field corresponding to the `ctime_f` field.
/// ctime is the last inode change time measured in floating point seconds
/// (including the fractional portion) since the unix epoch.
CTimeAsFloatField,
f32,
"ctime_f"
);
define_field!(
/// The field corresponding to the `mtime` field.
/// mtime is the last modified time measured in integer seconds
/// since the unix epoch.
MTimeField,
i64,
"mtime"
);
define_field!(
/// The field corresponding to the `mtime_f` field.
/// mtime is the last modified time measured in floating point seconds
/// (including the fractional portion) since the unix epoch.
MTimeAsFloatField,
f32,
"mtime_f"
);
define_field!(
/// The field corresponding to the `size` field.
/// This represents the size of the file in bytes.
SizeField,
usize,
"size"
);
define_field!(
/// The field corresponding to the `mode` field.
/// This encodes the full file type and permission bits.
/// Note that most programs and users are more comfortable with
/// this value when printed in octal.
/// It is recommended to use `FileTypeField` if all you need is the
/// file type and not the permission bits, as it is cheaper to
/// determine just the type in a virtualized filesystem.
ModeAndPermissionsField,
u64,
"mode"
);
define_field!(
/// The field corresponding to the `uid` field.
/// The uid field is the owning uid expressed as an integer.
/// This field is not meaningful on Windows.
OwnerUidField,
u32,
"uid"
);
define_field!(
/// The field corresponding to the `gid` field.
/// The gid field is the owning gid expressed as an integer.
/// This field is not meaningful on Windows.
OwnerGidField,
u32,
"gid"
);
define_field!(
/// The field corresponding to the `ino` field.
/// The ino field is the inode number expressed as an integer.
/// This field is not meaningful on Windows.
InodeNumberField,
u64,
"ino"
);
define_field!(
/// The field corresponding to the `dev` field.
/// The dev field is the device number expressed as an integer.
/// This field is not meaningful on Windows.
DeviceNumberField,
u64,
"dev"
);
define_field!(
/// The field corresponding to the `nlink` field.
/// The nlink field is the number of hard links to the file
/// expressed as an integer.
NumberOfLinksField,
u64,
"nlink"
);
define_field!(
/// The field corresponding to the `type` field.
/// The type field encodes the type of the file.
FileTypeField,
FileType,
"type"
);
define_field!(
/// The field corresponding to the `symlink_target` field.
/// For files of type symlink this evaluates to the result
/// of readlink(2) on the file.
SymlinkTargetField,
Option<String>,
"symlink_target"
);
define_field!(
/// The field corresponding to the `new` field.
/// The new field evaluates to true if a file is newer than
/// the since generator criteria.
///
/// Use of this field is discouraged as there are a number of
/// situations in which the newness has a counter-intuitive
/// value. In addition, computing newness in a virtualized
/// filesystem is relatively expensive.
///
/// If your application needs to reason about the transition
/// from `!exists -> exists` then you should track the
/// `ExistsField` in your application.
#[deprecated(note = "NewField can have counter-intuitive \
values in a number of situations so it \
is recommended that you track \
ExistsField instead")]
NewField,
bool,
"new"
);
/// A macro to help define a type to hold file information from
/// a query.
/// This macro enables a type-safe way to define the set of fields
/// to be returned and de-serialize only those fields.
///
/// This defines a struct that will receive the name and content
/// hash fields from the results. When used together with
/// `Client::query`, the query will automatically use the appropriate
/// list of field names:
///
/// ```
/// use watchman_client::prelude::*;
/// use serde::Deserialize;
///
/// query_result_type! {
/// struct NameAndHash {
/// name: NameField,
/// hash: ContentSha1HexField,
/// }
/// }
/// ```
///
/// The struct must consist of 2 or more fields; the macro subsystem
/// won't allow for generating an appropriate type definition for a single
/// field result.
///
/// If you need only a single field, look at [NameOnly](struct.NameOnly.html).
///
/// The field types must implement an undocumented trait that enables
/// the automatic field naming and correct deserialization regardless
/// of the field name in the struct. As such, you should consider
/// the set of fields to be limited to those provided by this crate.
#[macro_export]
macro_rules! query_result_type {(
$struct_vis:vis struct $tyname:ident {
$($field_vis:vis $field_name:ident : $field_ty:ty),+ $(,)?
}
) => (
#[derive(Deserialize, Debug, Clone)]
$struct_vis struct $tyname {
$(
#[serde(flatten)]
$field_vis $field_name: $field_ty,
)*
}
impl QueryFieldList for $tyname {
fn field_list() -> Vec <&'static str> {
vec![
$(
<$field_ty>::field_name(),
)*
]
}
}
)
}
/// Use the `NameOnly` struct when your desired field list in your
/// query results consist only of the name field.
/// It is not possible to use the `query_result_type!` macro to define
/// an appropriate type due to limitations in the Rust macro system.
#[derive(Deserialize, Debug, Clone)]
#[serde(from = "PathBuf")]
pub struct | {
pub name: NameField,
}
impl QueryFieldList for NameOnly {
fn field_list() -> Vec<&'static str> {
vec!["name"]
}
}
impl From<PathBuf> for NameOnly {
fn from(path: PathBuf) -> Self {
Self {
name: NameField { val: path },
}
}
}
| NameOnly | identifier_name |
fields.rs | //! Defines the fields used in the query result structs
// don't show deprecation warnings about NewField when we build this file
#![allow(deprecated)]
use crate::prelude::*;
use serde::Deserialize;
use std::path::PathBuf;
/// This trait is used to furnish the caller with the watchman
/// field name for an entry in the file results
#[doc(hidden)]
pub trait QueryFieldName {
fn field_name() -> &'static str;
}
/// This trait is used to produce the complete list of file
/// result field names for a query
#[doc(hidden)]
pub trait QueryFieldList {
fn field_list() -> Vec<&'static str>;
}
/// This macro defines a field struct that can be composed using
/// the `query_result_type!` macro into a struct that can be used
/// with the `Client::query` method.
macro_rules! define_field {(
$(#[$meta:meta])*
$tyname:ident, $ty:ty, $field_name:literal) => {
#[derive(Deserialize, Clone, Debug)]
$(#[$meta])*
pub struct $tyname {
#[serde(rename = $field_name)]
val: $ty,
}
impl QueryFieldName for $tyname {
fn field_name() -> &'static str {
$field_name
}
}
impl $tyname {
/// Consumes the field and returns the underlying
/// value storage
pub fn into_inner(self) -> $ty {
self.val
}
}
impl std::ops::Deref for $tyname {
type Target = $ty;
fn deref(&self) -> &Self::Target {
&self.val
}
}
impl std::ops::DerefMut for $tyname {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.val
}
}
};
}
define_field!(
/// The field corresponding to the `name` of the file.
NameField,
PathBuf,
"name"
);
define_field!(
/// The field corresponding to the `exists` status of the file
ExistsField,
bool,
"exists"
);
define_field!(
/// The field corresponding to the `cclock` field.
/// the cclock is the created clock; the clock value when we first observed the file,
/// or the clock value when it last switched from!exists to exists.
CreatedClockField,
ClockSpec,
"cclock"
);
define_field!(
/// The field corresponding to the `oclock` field.
/// the oclock is the observed clock; the clock value where we last observed some
/// change in this file or its metadata.
ObservedClockField,
ClockSpec,
"cclock"
);
define_field!(
/// The field corresponding to the `content.sha1hex` field.
/// For regular files this evaluates to the sha1 hash of the
/// file contents.
ContentSha1HexField,
ContentSha1Hex,
"content.sha1hex"
);
define_field!(
/// The field corresponding to the `ctime` field.
/// ctime is the last inode change time measured in integer seconds since the
/// unix epoch.
CTimeField,
i64,
"ctime"
);
define_field!(
/// The field corresponding to the `ctime_f` field.
/// ctime is the last inode change time measured in floating point seconds
/// (including the fractional portion) since the unix epoch.
CTimeAsFloatField,
f32,
"ctime_f"
);
define_field!(
/// The field corresponding to the `mtime` field.
/// mtime is the last modified time measured in integer seconds
/// since the unix epoch.
MTimeField,
i64,
"mtime"
);
define_field!(
/// The field corresponding to the `mtime_f` field.
/// mtime is the last modified time measured in floating point seconds
/// (including the fractional portion) since the unix epoch.
MTimeAsFloatField,
f32,
"mtime_f"
);
define_field!(
/// The field corresponding to the `size` field.
/// This represents the size of the file in bytes.
SizeField,
usize,
"size"
);
define_field!(
/// The field corresponding to the `mode` field.
/// This encodes the full file type and permission bits.
/// Note that most programs and users are more comfortable with
/// this value when printed in octal.
/// It is recommended to use `FileTypeField` if all you need is the
/// file type and not the permission bits, as it is cheaper to
/// determine just the type in a virtualized filesystem.
ModeAndPermissionsField,
u64,
"mode"
);
define_field!(
/// The field corresponding to the `uid` field.
/// The uid field is the owning uid expressed as an integer.
/// This field is not meaningful on Windows.
OwnerUidField,
u32,
"uid"
);
define_field!(
/// The field corresponding to the `gid` field.
/// The gid field is the owning gid expressed as an integer.
/// This field is not meaningful on Windows.
OwnerGidField,
u32,
"gid"
);
define_field!(
/// The field corresponding to the `ino` field.
/// The ino field is the inode number expressed as an integer.
/// This field is not meaningful on Windows.
InodeNumberField,
u64,
"ino"
);
define_field!(
/// The field corresponding to the `dev` field.
/// The dev field is the device number expressed as an integer.
/// This field is not meaningful on Windows.
DeviceNumberField,
u64,
"dev"
);
define_field!(
/// The field corresponding to the `nlink` field.
/// The nlink field is the number of hard links to the file
/// expressed as an integer.
NumberOfLinksField,
u64,
"nlink"
);
define_field!(
/// The field corresponding to the `type` field.
/// The type field encodes the type of the file.
FileTypeField,
FileType,
"type"
);
define_field!(
/// The field corresponding to the `symlink_target` field.
/// For files of type symlink this evaluates to the result
/// of readlink(2) on the file.
SymlinkTargetField,
Option<String>,
"symlink_target"
);
define_field!(
/// The field corresponding to the `new` field.
/// The new field evaluates to true if a file is newer than
/// the since generator criteria.
///
/// Use of this field is discouraged as there are a number of
/// situations in which the newness has a counter-intuitive
/// value. In addition, computing newness in a virtualized
/// filesystem is relatively expensive.
///
/// If your application needs to reason about the transition
/// from `!exists -> exists` then you should track the
/// `ExistsField` in your application.
#[deprecated(note = "NewField can have counter-intuitive \
values in a number of situations so it \
is recommended that you track \
ExistsField instead")]
NewField,
bool,
"new"
);
/// A macro to help define a type to hold file information from
/// a query.
/// This macro enables a type-safe way to define the set of fields
/// to be returned and de-serialize only those fields.
///
/// This defines a struct that will receive the name and content
/// hash fields from the results. When used together with
/// `Client::query`, the query will automatically use the appropriate
/// list of field names:
///
/// ```
/// use watchman_client::prelude::*;
/// use serde::Deserialize;
///
/// query_result_type! {
/// struct NameAndHash {
/// name: NameField,
/// hash: ContentSha1HexField,
/// }
/// }
/// ```
///
/// The struct must consist of 2 or more fields; the macro subsystem
/// won't allow for generating an appropriate type definition for a single
/// field result.
///
/// If you need only a single field, look at [NameOnly](struct.NameOnly.html).
///
/// The field types must implement an undocumented trait that enables
/// the automatic field naming and correct deserialization regardless
/// of the field name in the struct. As such, you should consider
/// the set of fields to be limited to those provided by this crate.
#[macro_export]
macro_rules! query_result_type {(
$struct_vis:vis struct $tyname:ident {
$($field_vis:vis $field_name:ident : $field_ty:ty),+ $(,)?
}
) => (
#[derive(Deserialize, Debug, Clone)]
$struct_vis struct $tyname {
$(
#[serde(flatten)]
$field_vis $field_name: $field_ty,
)*
}
impl QueryFieldList for $tyname {
fn field_list() -> Vec <&'static str> {
vec![
$(
<$field_ty>::field_name(),
)*
]
}
}
)
}
/// Use the `NameOnly` struct when your desired field list in your
/// query results consist only of the name field.
/// It is not possible to use the `query_result_type!` macro to define
/// an appropriate type due to limitations in the Rust macro system.
#[derive(Deserialize, Debug, Clone)]
#[serde(from = "PathBuf")]
pub struct NameOnly {
pub name: NameField,
}
impl QueryFieldList for NameOnly {
fn field_list() -> Vec<&'static str> {
vec!["name"]
}
}
impl From<PathBuf> for NameOnly {
fn from(path: PathBuf) -> Self |
}
| {
Self {
name: NameField { val: path },
}
} | identifier_body |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::column::ColumnsView;
use crate::elements::{FadeStyle, PuzzleCmd, PuzzleCore, PuzzleView};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Game, PuzzleState, WhatchaState};
// ========================================================================= //
pub struct View {
core: PuzzleCore<(usize, i32)>,
columns: ColumnsView,
show_columns: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &WhatchaState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::TopToBottom);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
columns: ColumnsView::new(resources, 278, 108, 0),
show_columns: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.whatcha_column;
self.core.draw_back_layer(canvas);
if self.show_columns {
self.columns.draw(state.columns(), canvas);
}
self.core.draw_middle_layer(canvas);
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.whatcha_column;
let mut action = self.core.handle_event(event, state);
if!action.should_stop()
&& self.show_columns
&& (event == &Event::ClockTick ||!state.is_solved())
{
let subaction =
self.columns.handle_event(event, state.columns_mut());
if let Some(&(col, by)) = subaction.value() {
state.rotate_column(col, by);
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
} else {
self.core.push_undo((col, by));
}
}
action.merge(subaction.but_no_value());
}
if!action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.whatcha_column.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, game: &mut Game) {
if let Some((col, by)) = self.core.pop_undo() {
self.columns.clear_drag();
game.whatcha_column.rotate_column(col, -by);
}
}
fn redo(&mut self, game: &mut Game) {
if let Some((col, by)) = self.core.pop_redo() {
self.columns.clear_drag();
game.whatcha_column.rotate_column(col, by);
}
}
fn reset(&mut self, game: &mut Game) {
self.columns.clear_drag();
self.core.clear_undo_redo();
game.whatcha_column.reset();
}
fn solve(&mut self, game: &mut Game) {
self.columns.clear_drag();
game.whatcha_column.solve();
self.core.begin_outro_scene();
}
fn | (&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_columns = value!= 0;
} else if kind == 1 && value >= 0 {
self.columns
.set_hilight_color(value as usize, (255, 128, 255));
} else if kind == 2 && value >= 0 {
self.columns.clear_hilight_color(value as usize);
}
}
}
}
// ========================================================================= //
const INFO_BOX_TEXT: &str = "\
Your goal is to slide the columns of letters until the
highlighted letters form a word horizontally across.
There is only one possible word that can be formed.
Drag a column up or down with $M{your finger}{the mouse} to rotate
its letters. Moving one column may also cause other
columns to move at the same time.
$M{Tap}{Click} on a character in the scene to hear their words
of wisdom.";
// ========================================================================= //
| drain_queue | identifier_name |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::column::ColumnsView;
use crate::elements::{FadeStyle, PuzzleCmd, PuzzleCore, PuzzleView};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Game, PuzzleState, WhatchaState};
// ========================================================================= //
pub struct View {
core: PuzzleCore<(usize, i32)>,
columns: ColumnsView,
show_columns: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &WhatchaState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::TopToBottom);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
columns: ColumnsView::new(resources, 278, 108, 0),
show_columns: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.whatcha_column;
self.core.draw_back_layer(canvas);
if self.show_columns {
self.columns.draw(state.columns(), canvas);
}
self.core.draw_middle_layer(canvas);
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.whatcha_column;
let mut action = self.core.handle_event(event, state);
if!action.should_stop()
&& self.show_columns
&& (event == &Event::ClockTick ||!state.is_solved())
{
let subaction =
self.columns.handle_event(event, state.columns_mut());
if let Some(&(col, by)) = subaction.value() {
state.rotate_column(col, by);
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
} else |
}
action.merge(subaction.but_no_value());
}
if!action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.whatcha_column.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, game: &mut Game) {
if let Some((col, by)) = self.core.pop_undo() {
self.columns.clear_drag();
game.whatcha_column.rotate_column(col, -by);
}
}
fn redo(&mut self, game: &mut Game) {
if let Some((col, by)) = self.core.pop_redo() {
self.columns.clear_drag();
game.whatcha_column.rotate_column(col, by);
}
}
fn reset(&mut self, game: &mut Game) {
self.columns.clear_drag();
self.core.clear_undo_redo();
game.whatcha_column.reset();
}
fn solve(&mut self, game: &mut Game) {
self.columns.clear_drag();
game.whatcha_column.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_columns = value!= 0;
} else if kind == 1 && value >= 0 {
self.columns
.set_hilight_color(value as usize, (255, 128, 255));
} else if kind == 2 && value >= 0 {
self.columns.clear_hilight_color(value as usize);
}
}
}
}
// ========================================================================= //
const INFO_BOX_TEXT: &str = "\
Your goal is to slide the columns of letters until the
highlighted letters form a word horizontally across.
There is only one possible word that can be formed.
Drag a column up or down with $M{your finger}{the mouse} to rotate
its letters. Moving one column may also cause other
columns to move at the same time.
$M{Tap}{Click} on a character in the scene to hear their words
of wisdom.";
// ========================================================================= //
| {
self.core.push_undo((col, by));
} | conditional_block |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::column::ColumnsView;
use crate::elements::{FadeStyle, PuzzleCmd, PuzzleCore, PuzzleView};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Game, PuzzleState, WhatchaState};
// ========================================================================= //
pub struct View {
core: PuzzleCore<(usize, i32)>,
columns: ColumnsView,
show_columns: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &WhatchaState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::TopToBottom);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
columns: ColumnsView::new(resources, 278, 108, 0),
show_columns: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.whatcha_column;
self.core.draw_back_layer(canvas);
if self.show_columns {
self.columns.draw(state.columns(), canvas); | }
self.core.draw_middle_layer(canvas);
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.whatcha_column;
let mut action = self.core.handle_event(event, state);
if!action.should_stop()
&& self.show_columns
&& (event == &Event::ClockTick ||!state.is_solved())
{
let subaction =
self.columns.handle_event(event, state.columns_mut());
if let Some(&(col, by)) = subaction.value() {
state.rotate_column(col, by);
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
} else {
self.core.push_undo((col, by));
}
}
action.merge(subaction.but_no_value());
}
if!action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.whatcha_column.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, game: &mut Game) {
if let Some((col, by)) = self.core.pop_undo() {
self.columns.clear_drag();
game.whatcha_column.rotate_column(col, -by);
}
}
fn redo(&mut self, game: &mut Game) {
if let Some((col, by)) = self.core.pop_redo() {
self.columns.clear_drag();
game.whatcha_column.rotate_column(col, by);
}
}
fn reset(&mut self, game: &mut Game) {
self.columns.clear_drag();
self.core.clear_undo_redo();
game.whatcha_column.reset();
}
fn solve(&mut self, game: &mut Game) {
self.columns.clear_drag();
game.whatcha_column.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_columns = value!= 0;
} else if kind == 1 && value >= 0 {
self.columns
.set_hilight_color(value as usize, (255, 128, 255));
} else if kind == 2 && value >= 0 {
self.columns.clear_hilight_color(value as usize);
}
}
}
}
// ========================================================================= //
const INFO_BOX_TEXT: &str = "\
Your goal is to slide the columns of letters until the
highlighted letters form a word horizontally across.
There is only one possible word that can be formed.
Drag a column up or down with $M{your finger}{the mouse} to rotate
its letters. Moving one column may also cause other
columns to move at the same time.
$M{Tap}{Click} on a character in the scene to hear their words
of wisdom.";
// ========================================================================= // | random_line_split |
|
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::column::ColumnsView;
use crate::elements::{FadeStyle, PuzzleCmd, PuzzleCore, PuzzleView};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Game, PuzzleState, WhatchaState};
// ========================================================================= //
pub struct View {
core: PuzzleCore<(usize, i32)>,
columns: ColumnsView,
show_columns: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &WhatchaState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::TopToBottom);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
columns: ColumnsView::new(resources, 278, 108, 0),
show_columns: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.whatcha_column;
self.core.draw_back_layer(canvas);
if self.show_columns {
self.columns.draw(state.columns(), canvas);
}
self.core.draw_middle_layer(canvas);
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.whatcha_column;
let mut action = self.core.handle_event(event, state);
if!action.should_stop()
&& self.show_columns
&& (event == &Event::ClockTick ||!state.is_solved())
{
let subaction =
self.columns.handle_event(event, state.columns_mut());
if let Some(&(col, by)) = subaction.value() {
state.rotate_column(col, by);
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
} else {
self.core.push_undo((col, by));
}
}
action.merge(subaction.but_no_value());
}
if!action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.whatcha_column.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, game: &mut Game) {
if let Some((col, by)) = self.core.pop_undo() {
self.columns.clear_drag();
game.whatcha_column.rotate_column(col, -by);
}
}
fn redo(&mut self, game: &mut Game) {
if let Some((col, by)) = self.core.pop_redo() {
self.columns.clear_drag();
game.whatcha_column.rotate_column(col, by);
}
}
fn reset(&mut self, game: &mut Game) |
fn solve(&mut self, game: &mut Game) {
self.columns.clear_drag();
game.whatcha_column.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_columns = value!= 0;
} else if kind == 1 && value >= 0 {
self.columns
.set_hilight_color(value as usize, (255, 128, 255));
} else if kind == 2 && value >= 0 {
self.columns.clear_hilight_color(value as usize);
}
}
}
}
// ========================================================================= //
const INFO_BOX_TEXT: &str = "\
Your goal is to slide the columns of letters until the
highlighted letters form a word horizontally across.
There is only one possible word that can be formed.
Drag a column up or down with $M{your finger}{the mouse} to rotate
its letters. Moving one column may also cause other
columns to move at the same time.
$M{Tap}{Click} on a character in the scene to hear their words
of wisdom.";
// ========================================================================= //
| {
self.columns.clear_drag();
self.core.clear_undo_redo();
game.whatcha_column.reset();
} | identifier_body |
multidispatch2.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::Debug; |
trait MyTrait<T> {
fn get(&self) -> T;
}
impl<T> MyTrait<T> for T
where T : Default
{
fn get(&self) -> T {
Default::default()
}
}
#[derive(Copy)]
struct MyType {
dummy: uint
}
impl MyTrait<uint> for MyType {
fn get(&self) -> uint { self.dummy }
}
fn test_eq<T,M>(m: M, v: T)
where T : Eq + Debug,
M : MyTrait<T>
{
assert_eq!(m.get(), v);
}
pub fn main() {
test_eq(22_usize, 0_usize);
let value = MyType { dummy: 256 + 22 };
test_eq(value, value.dummy);
} | use std::default::Default; | random_line_split |
multidispatch2.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::Debug;
use std::default::Default;
trait MyTrait<T> {
fn get(&self) -> T;
}
impl<T> MyTrait<T> for T
where T : Default
{
fn get(&self) -> T {
Default::default()
}
}
#[derive(Copy)]
struct MyType {
dummy: uint
}
impl MyTrait<uint> for MyType {
fn get(&self) -> uint { self.dummy }
}
fn | <T,M>(m: M, v: T)
where T : Eq + Debug,
M : MyTrait<T>
{
assert_eq!(m.get(), v);
}
pub fn main() {
test_eq(22_usize, 0_usize);
let value = MyType { dummy: 256 + 22 };
test_eq(value, value.dummy);
}
| test_eq | identifier_name |
color_button.rs | // Copyright 2016, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use ColorButton;
use Widget;
use ffi;
use gdk;
use glib::object::Downcast;
use glib::translate::*;
use std::mem;
impl ColorButton {
pub fn new_with_color(color: &gdk::Color) -> ColorButton |
pub fn new_with_rgba(rgba: &gdk::RGBA) -> ColorButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_color_button_new_with_rgba(rgba)).downcast_unchecked()
}
}
pub fn get_color(&self) -> gdk::Color {
unsafe {
let mut color = mem::uninitialized();
ffi::gtk_color_button_get_color(self.to_glib_none().0, &mut color);
color
}
}
pub fn get_rgba(&self) -> gdk::RGBA {
unsafe {
let mut rgba = mem::uninitialized();
ffi::gtk_color_button_get_rgba(self.to_glib_none().0, &mut rgba);
rgba
}
}
pub fn set_color(&self, color: &gdk::Color) {
unsafe { ffi::gtk_color_button_set_color(self.to_glib_none().0, color) }
}
pub fn set_rgba(&self, rgba: &gdk::RGBA) {
unsafe { ffi::gtk_color_button_set_rgba(self.to_glib_none().0, rgba) }
}
}
| {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_color_button_new_with_color(color)).downcast_unchecked()
}
} | identifier_body |
color_button.rs | // Copyright 2016, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use ColorButton;
use Widget;
use ffi;
use gdk;
use glib::object::Downcast;
use glib::translate::*;
use std::mem;
impl ColorButton {
pub fn new_with_color(color: &gdk::Color) -> ColorButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_color_button_new_with_color(color)).downcast_unchecked()
}
}
pub fn | (rgba: &gdk::RGBA) -> ColorButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_color_button_new_with_rgba(rgba)).downcast_unchecked()
}
}
pub fn get_color(&self) -> gdk::Color {
unsafe {
let mut color = mem::uninitialized();
ffi::gtk_color_button_get_color(self.to_glib_none().0, &mut color);
color
}
}
pub fn get_rgba(&self) -> gdk::RGBA {
unsafe {
let mut rgba = mem::uninitialized();
ffi::gtk_color_button_get_rgba(self.to_glib_none().0, &mut rgba);
rgba
}
}
pub fn set_color(&self, color: &gdk::Color) {
unsafe { ffi::gtk_color_button_set_color(self.to_glib_none().0, color) }
}
pub fn set_rgba(&self, rgba: &gdk::RGBA) {
unsafe { ffi::gtk_color_button_set_rgba(self.to_glib_none().0, rgba) }
}
}
| new_with_rgba | identifier_name |
color_button.rs | // Copyright 2016, The Gtk-rs Project Developers. | // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use ColorButton;
use Widget;
use ffi;
use gdk;
use glib::object::Downcast;
use glib::translate::*;
use std::mem;
impl ColorButton {
pub fn new_with_color(color: &gdk::Color) -> ColorButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_color_button_new_with_color(color)).downcast_unchecked()
}
}
pub fn new_with_rgba(rgba: &gdk::RGBA) -> ColorButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_color_button_new_with_rgba(rgba)).downcast_unchecked()
}
}
pub fn get_color(&self) -> gdk::Color {
unsafe {
let mut color = mem::uninitialized();
ffi::gtk_color_button_get_color(self.to_glib_none().0, &mut color);
color
}
}
pub fn get_rgba(&self) -> gdk::RGBA {
unsafe {
let mut rgba = mem::uninitialized();
ffi::gtk_color_button_get_rgba(self.to_glib_none().0, &mut rgba);
rgba
}
}
pub fn set_color(&self, color: &gdk::Color) {
unsafe { ffi::gtk_color_button_set_color(self.to_glib_none().0, color) }
}
pub fn set_rgba(&self, rgba: &gdk::RGBA) {
unsafe { ffi::gtk_color_button_set_rgba(self.to_glib_none().0, rgba) }
}
} | // See the COPYRIGHT file at the top-level directory of this distribution. | random_line_split |
net.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io;
use libc::consts::os::extra::INVALID_SOCKET;
use libc::{self, c_int, c_void};
use mem;
use net::SocketAddr;
#[allow(deprecated)]
use num::{SignedInt, Int};
use rt;
use sync::{Once, ONCE_INIT};
use sys::c;
use sys_common::{AsInner, FromInner};
pub type wrlen_t = i32;
pub struct Socket(libc::SOCKET);
/// Checks whether the Windows socket interface has been started already, and
/// if not, starts it.
pub fn init() {
static START: Once = ONCE_INIT;
START.call_once(|| unsafe {
let mut data: c::WSADATA = mem::zeroed();
let ret = c::WSAStartup(0x202, // version 2.2
&mut data);
assert_eq!(ret, 0);
let _ = rt::at_exit(|| { c::WSACleanup(); });
});
}
/// Returns the last error from the Windows socket interface.
fn last_error() -> io::Error {
io::Error::from_raw_os_error(unsafe { c::WSAGetLastError() })
}
/// Checks if the signed integer is the Windows constant `SOCKET_ERROR` (-1)
/// and if so, returns the last error from the Windows socket interface.. This
/// function must be called before another call to the socket API is made.
///
/// FIXME: generics needed?
#[allow(deprecated)]
pub fn cvt<T: SignedInt>(t: T) -> io::Result<T> {
let one: T = Int::one();
if t == -one {
Err(last_error())
} else {
Ok(t)
}
}
/// Provides the functionality of `cvt` for the return values of `getaddrinfo`
/// and similar, meaning that they return an error if the return value is 0.
pub fn cvt_gai(err: c_int) -> io::Result<()> {
if err == 0 { return Ok(()) }
cvt(err).map(|_| ())
}
/// Provides the functionality of `cvt` for a closure.
#[allow(deprecated)]
pub fn cvt_r<T: SignedInt, F>(mut f: F) -> io::Result<T> where F: FnMut() -> T {
cvt(f())
}
impl Socket {
pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> |
pub fn accept(&self, storage: *mut libc::sockaddr,
len: *mut libc::socklen_t) -> io::Result<Socket> {
match unsafe { libc::accept(self.0, storage, len) } {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
pub fn duplicate(&self) -> io::Result<Socket> {
unsafe {
let mut info: c::WSAPROTOCOL_INFO = mem::zeroed();
try!(cvt(c::WSADuplicateSocketW(self.0,
c::GetCurrentProcessId(),
&mut info)));
match c::WSASocketW(info.iAddressFamily,
info.iSocketType,
info.iProtocol,
&mut info, 0, 0) {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
// On unix when a socket is shut down all further reads return 0, so we
// do the same on windows to map a shut down socket to returning EOF.
unsafe {
match libc::recv(self.0, buf.as_mut_ptr() as *mut c_void,
buf.len() as i32, 0) {
-1 if c::WSAGetLastError() == c::WSAESHUTDOWN => Ok(0),
-1 => Err(last_error()),
n => Ok(n as usize)
}
}
}
}
impl Drop for Socket {
fn drop(&mut self) {
let _ = unsafe { libc::closesocket(self.0) };
}
}
impl AsInner<libc::SOCKET> for Socket {
fn as_inner(&self) -> &libc::SOCKET { &self.0 }
}
impl FromInner<libc::SOCKET> for Socket {
fn from_inner(sock: libc::SOCKET) -> Socket { Socket(sock) }
}
| {
let fam = match *addr {
SocketAddr::V4(..) => libc::AF_INET,
SocketAddr::V6(..) => libc::AF_INET6,
};
match unsafe { libc::socket(fam, ty, 0) } {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
} | identifier_body |
net.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io;
use libc::consts::os::extra::INVALID_SOCKET;
use libc::{self, c_int, c_void};
use mem;
use net::SocketAddr;
#[allow(deprecated)]
use num::{SignedInt, Int};
use rt;
use sync::{Once, ONCE_INIT};
use sys::c;
use sys_common::{AsInner, FromInner};
pub type wrlen_t = i32;
pub struct Socket(libc::SOCKET);
/// Checks whether the Windows socket interface has been started already, and
/// if not, starts it.
pub fn init() {
static START: Once = ONCE_INIT;
START.call_once(|| unsafe {
let mut data: c::WSADATA = mem::zeroed();
let ret = c::WSAStartup(0x202, // version 2.2
&mut data);
assert_eq!(ret, 0);
let _ = rt::at_exit(|| { c::WSACleanup(); });
});
}
/// Returns the last error from the Windows socket interface.
fn last_error() -> io::Error {
io::Error::from_raw_os_error(unsafe { c::WSAGetLastError() })
}
/// Checks if the signed integer is the Windows constant `SOCKET_ERROR` (-1)
/// and if so, returns the last error from the Windows socket interface.. This
/// function must be called before another call to the socket API is made.
///
/// FIXME: generics needed?
#[allow(deprecated)]
pub fn cvt<T: SignedInt>(t: T) -> io::Result<T> {
let one: T = Int::one();
if t == -one {
Err(last_error())
} else {
Ok(t)
}
}
/// Provides the functionality of `cvt` for the return values of `getaddrinfo`
/// and similar, meaning that they return an error if the return value is 0.
pub fn cvt_gai(err: c_int) -> io::Result<()> {
if err == 0 { return Ok(()) }
cvt(err).map(|_| ())
}
/// Provides the functionality of `cvt` for a closure.
#[allow(deprecated)]
pub fn cvt_r<T: SignedInt, F>(mut f: F) -> io::Result<T> where F: FnMut() -> T { |
impl Socket {
pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
let fam = match *addr {
SocketAddr::V4(..) => libc::AF_INET,
SocketAddr::V6(..) => libc::AF_INET6,
};
match unsafe { libc::socket(fam, ty, 0) } {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
pub fn accept(&self, storage: *mut libc::sockaddr,
len: *mut libc::socklen_t) -> io::Result<Socket> {
match unsafe { libc::accept(self.0, storage, len) } {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
pub fn duplicate(&self) -> io::Result<Socket> {
unsafe {
let mut info: c::WSAPROTOCOL_INFO = mem::zeroed();
try!(cvt(c::WSADuplicateSocketW(self.0,
c::GetCurrentProcessId(),
&mut info)));
match c::WSASocketW(info.iAddressFamily,
info.iSocketType,
info.iProtocol,
&mut info, 0, 0) {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
// On unix when a socket is shut down all further reads return 0, so we
// do the same on windows to map a shut down socket to returning EOF.
unsafe {
match libc::recv(self.0, buf.as_mut_ptr() as *mut c_void,
buf.len() as i32, 0) {
-1 if c::WSAGetLastError() == c::WSAESHUTDOWN => Ok(0),
-1 => Err(last_error()),
n => Ok(n as usize)
}
}
}
}
impl Drop for Socket {
fn drop(&mut self) {
let _ = unsafe { libc::closesocket(self.0) };
}
}
impl AsInner<libc::SOCKET> for Socket {
fn as_inner(&self) -> &libc::SOCKET { &self.0 }
}
impl FromInner<libc::SOCKET> for Socket {
fn from_inner(sock: libc::SOCKET) -> Socket { Socket(sock) }
} | cvt(f())
} | random_line_split |
net.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io;
use libc::consts::os::extra::INVALID_SOCKET;
use libc::{self, c_int, c_void};
use mem;
use net::SocketAddr;
#[allow(deprecated)]
use num::{SignedInt, Int};
use rt;
use sync::{Once, ONCE_INIT};
use sys::c;
use sys_common::{AsInner, FromInner};
pub type wrlen_t = i32;
pub struct Socket(libc::SOCKET);
/// Checks whether the Windows socket interface has been started already, and
/// if not, starts it.
pub fn init() {
static START: Once = ONCE_INIT;
START.call_once(|| unsafe {
let mut data: c::WSADATA = mem::zeroed();
let ret = c::WSAStartup(0x202, // version 2.2
&mut data);
assert_eq!(ret, 0);
let _ = rt::at_exit(|| { c::WSACleanup(); });
});
}
/// Returns the last error from the Windows socket interface.
fn last_error() -> io::Error {
io::Error::from_raw_os_error(unsafe { c::WSAGetLastError() })
}
/// Checks if the signed integer is the Windows constant `SOCKET_ERROR` (-1)
/// and if so, returns the last error from the Windows socket interface.. This
/// function must be called before another call to the socket API is made.
///
/// FIXME: generics needed?
#[allow(deprecated)]
pub fn cvt<T: SignedInt>(t: T) -> io::Result<T> {
let one: T = Int::one();
if t == -one {
Err(last_error())
} else {
Ok(t)
}
}
/// Provides the functionality of `cvt` for the return values of `getaddrinfo`
/// and similar, meaning that they return an error if the return value is 0.
pub fn | (err: c_int) -> io::Result<()> {
if err == 0 { return Ok(()) }
cvt(err).map(|_| ())
}
/// Provides the functionality of `cvt` for a closure.
#[allow(deprecated)]
pub fn cvt_r<T: SignedInt, F>(mut f: F) -> io::Result<T> where F: FnMut() -> T {
cvt(f())
}
impl Socket {
pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
let fam = match *addr {
SocketAddr::V4(..) => libc::AF_INET,
SocketAddr::V6(..) => libc::AF_INET6,
};
match unsafe { libc::socket(fam, ty, 0) } {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
pub fn accept(&self, storage: *mut libc::sockaddr,
len: *mut libc::socklen_t) -> io::Result<Socket> {
match unsafe { libc::accept(self.0, storage, len) } {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
pub fn duplicate(&self) -> io::Result<Socket> {
unsafe {
let mut info: c::WSAPROTOCOL_INFO = mem::zeroed();
try!(cvt(c::WSADuplicateSocketW(self.0,
c::GetCurrentProcessId(),
&mut info)));
match c::WSASocketW(info.iAddressFamily,
info.iSocketType,
info.iProtocol,
&mut info, 0, 0) {
INVALID_SOCKET => Err(last_error()),
n => Ok(Socket(n)),
}
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
// On unix when a socket is shut down all further reads return 0, so we
// do the same on windows to map a shut down socket to returning EOF.
unsafe {
match libc::recv(self.0, buf.as_mut_ptr() as *mut c_void,
buf.len() as i32, 0) {
-1 if c::WSAGetLastError() == c::WSAESHUTDOWN => Ok(0),
-1 => Err(last_error()),
n => Ok(n as usize)
}
}
}
}
impl Drop for Socket {
fn drop(&mut self) {
let _ = unsafe { libc::closesocket(self.0) };
}
}
impl AsInner<libc::SOCKET> for Socket {
fn as_inner(&self) -> &libc::SOCKET { &self.0 }
}
impl FromInner<libc::SOCKET> for Socket {
fn from_inner(sock: libc::SOCKET) -> Socket { Socket(sock) }
}
| cvt_gai | identifier_name |
from_bits.rs | use itertools::Itertools;
use malachite_base::num::basic::traits::Zero;
use malachite_base::num::conversion::traits::ExactFrom;
use malachite_base::num::logic::traits::BitAccess;
use malachite_nz::natural::Natural;
pub fn from_bits_asc_naive<I: Iterator<Item = bool>>(bits: I) -> Natural {
let mut n = Natural::ZERO;
for i in bits.enumerate().filter_map(|(index, bit)| {
if bit {
Some(u64::exact_from(index))
} else {
None
}
}) {
n.set_bit(i);
}
n
}
pub fn from_bits_desc_naive<I: Iterator<Item = bool>>(bits: I) -> Natural | {
let bits = bits.collect_vec();
let mut n = Natural::ZERO;
for i in bits.iter().rev().enumerate().filter_map(|(index, &bit)| {
if bit {
Some(u64::exact_from(index))
} else {
None
}
}) {
n.set_bit(i);
}
n
} | identifier_body |
|
from_bits.rs | use itertools::Itertools;
use malachite_base::num::basic::traits::Zero;
use malachite_base::num::conversion::traits::ExactFrom;
use malachite_base::num::logic::traits::BitAccess;
use malachite_nz::natural::Natural;
pub fn | <I: Iterator<Item = bool>>(bits: I) -> Natural {
let mut n = Natural::ZERO;
for i in bits.enumerate().filter_map(|(index, bit)| {
if bit {
Some(u64::exact_from(index))
} else {
None
}
}) {
n.set_bit(i);
}
n
}
pub fn from_bits_desc_naive<I: Iterator<Item = bool>>(bits: I) -> Natural {
let bits = bits.collect_vec();
let mut n = Natural::ZERO;
for i in bits.iter().rev().enumerate().filter_map(|(index, &bit)| {
if bit {
Some(u64::exact_from(index))
} else {
None
}
}) {
n.set_bit(i);
}
n
}
| from_bits_asc_naive | identifier_name |
from_bits.rs | use itertools::Itertools;
use malachite_base::num::basic::traits::Zero;
use malachite_base::num::conversion::traits::ExactFrom;
use malachite_base::num::logic::traits::BitAccess;
use malachite_nz::natural::Natural;
pub fn from_bits_asc_naive<I: Iterator<Item = bool>>(bits: I) -> Natural {
let mut n = Natural::ZERO;
for i in bits.enumerate().filter_map(|(index, bit)| {
if bit {
Some(u64::exact_from(index))
} else {
None
}
}) {
n.set_bit(i);
}
n
}
pub fn from_bits_desc_naive<I: Iterator<Item = bool>>(bits: I) -> Natural {
let bits = bits.collect_vec();
let mut n = Natural::ZERO;
for i in bits.iter().rev().enumerate().filter_map(|(index, &bit)| {
if bit | else {
None
}
}) {
n.set_bit(i);
}
n
}
| {
Some(u64::exact_from(index))
} | conditional_block |
from_bits.rs | use itertools::Itertools;
use malachite_base::num::basic::traits::Zero;
use malachite_base::num::conversion::traits::ExactFrom;
use malachite_base::num::logic::traits::BitAccess;
use malachite_nz::natural::Natural; | if bit {
Some(u64::exact_from(index))
} else {
None
}
}) {
n.set_bit(i);
}
n
}
pub fn from_bits_desc_naive<I: Iterator<Item = bool>>(bits: I) -> Natural {
let bits = bits.collect_vec();
let mut n = Natural::ZERO;
for i in bits.iter().rev().enumerate().filter_map(|(index, &bit)| {
if bit {
Some(u64::exact_from(index))
} else {
None
}
}) {
n.set_bit(i);
}
n
} |
pub fn from_bits_asc_naive<I: Iterator<Item = bool>>(bits: I) -> Natural {
let mut n = Natural::ZERO;
for i in bits.enumerate().filter_map(|(index, bit)| { | random_line_split |
integer-overflow.rs | // Copyright 2015 Keegan McAllister.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// See `LICENSE` in this repository.
#![feature(plugin, raw)]
#![plugin(afl_coverage_plugin)]
// Integer overflow bug.
// Loosely based on:
// https://github.com/sandstorm-io/capnproto/blob/master/security-advisories/2015-03-02-0-c%2B%2B-integer-overflow.md
extern crate afl_coverage;
extern crate byteorder;
use std::{mem, io, raw}; | use byteorder::{ReadBytesExt, LittleEndian, Error};
fn main() {
let mut stdin = io::stdin();
// First, the element size.
let bytes_per_element = stdin.read_u32::<LittleEndian>().unwrap();
loop {
let element_count = match stdin.read_u32::<LittleEndian>() {
Err(Error::UnexpectedEOF) => break,
Err(e) => panic!(e),
Ok(n) => n,
};
let total_size = element_count.wrapping_mul(bytes_per_element);
assert!(total_size <= (1 << 20)); // 1MB limit
let total_size = total_size as usize;
let mut buf: Vec<u8> = Vec::with_capacity(total_size);
let dest: &mut [u8] = unsafe {
mem::transmute(raw::Slice {
data: buf.as_ptr(),
len: (element_count as usize) * (bytes_per_element as usize),
})
};
match stdin.by_ref().read(dest) {
Ok(n) if n == total_size => {
unsafe {
buf.set_len(n);
println!("full read: {:?}", buf);
}
}
Ok(n) => println!("partial read: got {}, expected {}", n, total_size),
Err(_) => println!("error!"),
}
}
} | use std::io::Read; | random_line_split |
integer-overflow.rs | // Copyright 2015 Keegan McAllister.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// See `LICENSE` in this repository.
#![feature(plugin, raw)]
#![plugin(afl_coverage_plugin)]
// Integer overflow bug.
// Loosely based on:
// https://github.com/sandstorm-io/capnproto/blob/master/security-advisories/2015-03-02-0-c%2B%2B-integer-overflow.md
extern crate afl_coverage;
extern crate byteorder;
use std::{mem, io, raw};
use std::io::Read;
use byteorder::{ReadBytesExt, LittleEndian, Error};
fn | () {
let mut stdin = io::stdin();
// First, the element size.
let bytes_per_element = stdin.read_u32::<LittleEndian>().unwrap();
loop {
let element_count = match stdin.read_u32::<LittleEndian>() {
Err(Error::UnexpectedEOF) => break,
Err(e) => panic!(e),
Ok(n) => n,
};
let total_size = element_count.wrapping_mul(bytes_per_element);
assert!(total_size <= (1 << 20)); // 1MB limit
let total_size = total_size as usize;
let mut buf: Vec<u8> = Vec::with_capacity(total_size);
let dest: &mut [u8] = unsafe {
mem::transmute(raw::Slice {
data: buf.as_ptr(),
len: (element_count as usize) * (bytes_per_element as usize),
})
};
match stdin.by_ref().read(dest) {
Ok(n) if n == total_size => {
unsafe {
buf.set_len(n);
println!("full read: {:?}", buf);
}
}
Ok(n) => println!("partial read: got {}, expected {}", n, total_size),
Err(_) => println!("error!"),
}
}
}
| main | identifier_name |
integer-overflow.rs | // Copyright 2015 Keegan McAllister.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// See `LICENSE` in this repository.
#![feature(plugin, raw)]
#![plugin(afl_coverage_plugin)]
// Integer overflow bug.
// Loosely based on:
// https://github.com/sandstorm-io/capnproto/blob/master/security-advisories/2015-03-02-0-c%2B%2B-integer-overflow.md
extern crate afl_coverage;
extern crate byteorder;
use std::{mem, io, raw};
use std::io::Read;
use byteorder::{ReadBytesExt, LittleEndian, Error};
fn main() | mem::transmute(raw::Slice {
data: buf.as_ptr(),
len: (element_count as usize) * (bytes_per_element as usize),
})
};
match stdin.by_ref().read(dest) {
Ok(n) if n == total_size => {
unsafe {
buf.set_len(n);
println!("full read: {:?}", buf);
}
}
Ok(n) => println!("partial read: got {}, expected {}", n, total_size),
Err(_) => println!("error!"),
}
}
}
| {
let mut stdin = io::stdin();
// First, the element size.
let bytes_per_element = stdin.read_u32::<LittleEndian>().unwrap();
loop {
let element_count = match stdin.read_u32::<LittleEndian>() {
Err(Error::UnexpectedEOF) => break,
Err(e) => panic!(e),
Ok(n) => n,
};
let total_size = element_count.wrapping_mul(bytes_per_element);
assert!(total_size <= (1 << 20)); // 1MB limit
let total_size = total_size as usize;
let mut buf: Vec<u8> = Vec::with_capacity(total_size);
let dest: &mut [u8] = unsafe { | identifier_body |
line.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::io::stdout;
use std::io::Write;
use lister::Lister;
use error::Result;
use error::ResultExt;
use libimagstore::store::FileLockEntry;
pub struct LineLister<'a> {
unknown_output: &'a str,
}
impl<'a> LineLister<'a> {
pub fn new(unknown_output: &'a str) -> LineLister<'a> {
LineLister {
unknown_output: unknown_output,
}
}
}
impl<'a> Lister for LineLister<'a> {
fn list<'b, I: Iterator<Item = FileLockEntry<'b>>>(&self, entries: I) -> Result<()> |
}
| {
use error::ListErrorKind as LEK;
for entry in entries {
let s = entry.get_location().to_str().unwrap_or(String::from(self.unknown_output));
write!(stdout(), "{:?}\n", s).chain_err(|| LEK::FormatError)?
}
Ok(())
} | identifier_body |
line.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::io::stdout;
use std::io::Write;
use lister::Lister;
use error::Result;
use error::ResultExt;
use libimagstore::store::FileLockEntry;
pub struct | <'a> {
unknown_output: &'a str,
}
impl<'a> LineLister<'a> {
pub fn new(unknown_output: &'a str) -> LineLister<'a> {
LineLister {
unknown_output: unknown_output,
}
}
}
impl<'a> Lister for LineLister<'a> {
fn list<'b, I: Iterator<Item = FileLockEntry<'b>>>(&self, entries: I) -> Result<()> {
use error::ListErrorKind as LEK;
for entry in entries {
let s = entry.get_location().to_str().unwrap_or(String::from(self.unknown_output));
write!(stdout(), "{:?}\n", s).chain_err(|| LEK::FormatError)?
}
Ok(())
}
}
| LineLister | identifier_name |
line.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::io::stdout;
use std::io::Write;
use lister::Lister;
use error::Result;
use error::ResultExt;
use libimagstore::store::FileLockEntry; | unknown_output: &'a str,
}
impl<'a> LineLister<'a> {
pub fn new(unknown_output: &'a str) -> LineLister<'a> {
LineLister {
unknown_output: unknown_output,
}
}
}
impl<'a> Lister for LineLister<'a> {
fn list<'b, I: Iterator<Item = FileLockEntry<'b>>>(&self, entries: I) -> Result<()> {
use error::ListErrorKind as LEK;
for entry in entries {
let s = entry.get_location().to_str().unwrap_or(String::from(self.unknown_output));
write!(stdout(), "{:?}\n", s).chain_err(|| LEK::FormatError)?
}
Ok(())
}
} |
pub struct LineLister<'a> { | random_line_split |
tir.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use super::{PrimExpr, PrimExprNode};
use crate::ir::span::Span;
use crate::runtime::{IsObjectRef, String as TVMString};
use crate::DataType;
use tvm_macros::Object;
macro_rules! define_node {
($name:ident, $ref:expr, $typekey:expr; $node:ident { $($id:ident : $t:ty),*}) => {
#[repr(C)]
#[derive(Object, Debug)]
#[ref_name = $ref]
#[type_key = $typekey]
pub struct $node {
base: PrimExprNode,
$(pub $id : $t),*
}
impl $name {
pub fn new(datatype: DataType, $($id : $t,)*) -> $name {
let base = PrimExprNode::base::<$node>(datatype, Span::null());
let node = $node { base, $($id),* };
node.into()
}
}
}
}
// TODO(@jroesch): should move up to expr.rs to mirror TVM.
define_node!(IntImm, "IntImm", "IntImm";
IntImmNode { value: i64 });
impl From<i32> for IntImm {
fn from(i: i32) -> IntImm {
IntImm::new(DataType::int(32, 1), i as i64)
}
}
impl From<i32> for PrimExpr {
fn | (i: i32) -> PrimExpr {
IntImm::from(i).upcast()
}
}
define_node!(Var, "Var", "tir.Var";
VarNode { name_hint: TVMString });
define_node!(Add, "Add", "tir.Add"; AddNode { a: PrimExpr, b: PrimExpr });
define_node!(Sub, "Sub", "tir.Sub"; SubNode { a: PrimExpr, b: PrimExpr });
define_node!(Mul, "Mul", "tir.Mul"; MulNode { a: PrimExpr, b: PrimExpr });
define_node!(Div, "Div", "tir.Div"; DivNode { a: PrimExpr, b: PrimExpr });
define_node!(Mod, "Mod", "tir.Mod"; ModNode { a: PrimExpr, b: PrimExpr });
define_node!(FloorDiv, "FloorDiv", "tir.FloorDiv"; FloorDivNode { a: PrimExpr, b: PrimExpr });
define_node!(FloorMod, "FloorMod", "tir.FloorMod"; FloorModNode { a: PrimExpr, b: PrimExpr });
define_node!(Min, "Min", "tir.Min"; MinNode { a: PrimExpr, b: PrimExpr });
define_node!(Max, "Max", "tir.Max"; MaxNode { a: PrimExpr, b: PrimExpr });
// the new datatype is in the base expr
define_node!(Cast, "Cast", "tir.Cast"; CastNode { value: PrimExpr });
// renamed base to start to avoid name clash
define_node!(Ramp, "Ramp", "tir.Ramp"; RampNode { start: PrimExpr, stride: PrimExpr, lanes: i32 });
define_node!(Select, "Select", "tir.Select";
SelectNode { condition: PrimExpr, true_value: PrimExpr, false_value: PrimExpr });
define_node!(Eq, "Eq", "tir.EQ"; EqNode { a: PrimExpr, b: PrimExpr });
define_node!(Ne, "Ne", "tir.NE"; NeNode { a: PrimExpr, b: PrimExpr });
define_node!(Lt, "Lt", "tir.LT"; LtNode { a: PrimExpr, b: PrimExpr });
define_node!(Le, "Le", "tir.LE"; LeNode { a: PrimExpr, b: PrimExpr });
define_node!(Gt, "Gt", "tir.GT"; GtNode { a: PrimExpr, b: PrimExpr });
define_node!(Ge, "Ge", "tir.GE"; GeNode { a: PrimExpr, b: PrimExpr });
define_node!(And, "And", "tir.And"; AndNode { a: PrimExpr, b: PrimExpr });
define_node!(Or, "Or", "tir.Or"; OrNode { a: PrimExpr, b: PrimExpr });
define_node!(Not, "Not", "tir.Not"; NotNode { value: PrimExpr });
define_node!(Let, "Let", "tir.Let"; LetNode { var: Var, value: PrimExpr, body: PrimExpr });
| from | identifier_name |
tir.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use super::{PrimExpr, PrimExprNode};
use crate::ir::span::Span;
use crate::runtime::{IsObjectRef, String as TVMString}; | use crate::DataType;
use tvm_macros::Object;
macro_rules! define_node {
($name:ident, $ref:expr, $typekey:expr; $node:ident { $($id:ident : $t:ty),*}) => {
#[repr(C)]
#[derive(Object, Debug)]
#[ref_name = $ref]
#[type_key = $typekey]
pub struct $node {
base: PrimExprNode,
$(pub $id : $t),*
}
impl $name {
pub fn new(datatype: DataType, $($id : $t,)*) -> $name {
let base = PrimExprNode::base::<$node>(datatype, Span::null());
let node = $node { base, $($id),* };
node.into()
}
}
}
}
// TODO(@jroesch): should move up to expr.rs to mirror TVM.
define_node!(IntImm, "IntImm", "IntImm";
IntImmNode { value: i64 });
impl From<i32> for IntImm {
fn from(i: i32) -> IntImm {
IntImm::new(DataType::int(32, 1), i as i64)
}
}
impl From<i32> for PrimExpr {
fn from(i: i32) -> PrimExpr {
IntImm::from(i).upcast()
}
}
define_node!(Var, "Var", "tir.Var";
VarNode { name_hint: TVMString });
define_node!(Add, "Add", "tir.Add"; AddNode { a: PrimExpr, b: PrimExpr });
define_node!(Sub, "Sub", "tir.Sub"; SubNode { a: PrimExpr, b: PrimExpr });
define_node!(Mul, "Mul", "tir.Mul"; MulNode { a: PrimExpr, b: PrimExpr });
define_node!(Div, "Div", "tir.Div"; DivNode { a: PrimExpr, b: PrimExpr });
define_node!(Mod, "Mod", "tir.Mod"; ModNode { a: PrimExpr, b: PrimExpr });
define_node!(FloorDiv, "FloorDiv", "tir.FloorDiv"; FloorDivNode { a: PrimExpr, b: PrimExpr });
define_node!(FloorMod, "FloorMod", "tir.FloorMod"; FloorModNode { a: PrimExpr, b: PrimExpr });
define_node!(Min, "Min", "tir.Min"; MinNode { a: PrimExpr, b: PrimExpr });
define_node!(Max, "Max", "tir.Max"; MaxNode { a: PrimExpr, b: PrimExpr });
// the new datatype is in the base expr
define_node!(Cast, "Cast", "tir.Cast"; CastNode { value: PrimExpr });
// renamed base to start to avoid name clash
define_node!(Ramp, "Ramp", "tir.Ramp"; RampNode { start: PrimExpr, stride: PrimExpr, lanes: i32 });
define_node!(Select, "Select", "tir.Select";
SelectNode { condition: PrimExpr, true_value: PrimExpr, false_value: PrimExpr });
define_node!(Eq, "Eq", "tir.EQ"; EqNode { a: PrimExpr, b: PrimExpr });
define_node!(Ne, "Ne", "tir.NE"; NeNode { a: PrimExpr, b: PrimExpr });
define_node!(Lt, "Lt", "tir.LT"; LtNode { a: PrimExpr, b: PrimExpr });
define_node!(Le, "Le", "tir.LE"; LeNode { a: PrimExpr, b: PrimExpr });
define_node!(Gt, "Gt", "tir.GT"; GtNode { a: PrimExpr, b: PrimExpr });
define_node!(Ge, "Ge", "tir.GE"; GeNode { a: PrimExpr, b: PrimExpr });
define_node!(And, "And", "tir.And"; AndNode { a: PrimExpr, b: PrimExpr });
define_node!(Or, "Or", "tir.Or"; OrNode { a: PrimExpr, b: PrimExpr });
define_node!(Not, "Not", "tir.Not"; NotNode { value: PrimExpr });
define_node!(Let, "Let", "tir.Let"; LetNode { var: Var, value: PrimExpr, body: PrimExpr }); | random_line_split |
|
tir.rs | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
use super::{PrimExpr, PrimExprNode};
use crate::ir::span::Span;
use crate::runtime::{IsObjectRef, String as TVMString};
use crate::DataType;
use tvm_macros::Object;
macro_rules! define_node {
($name:ident, $ref:expr, $typekey:expr; $node:ident { $($id:ident : $t:ty),*}) => {
#[repr(C)]
#[derive(Object, Debug)]
#[ref_name = $ref]
#[type_key = $typekey]
pub struct $node {
base: PrimExprNode,
$(pub $id : $t),*
}
impl $name {
pub fn new(datatype: DataType, $($id : $t,)*) -> $name {
let base = PrimExprNode::base::<$node>(datatype, Span::null());
let node = $node { base, $($id),* };
node.into()
}
}
}
}
// TODO(@jroesch): should move up to expr.rs to mirror TVM.
define_node!(IntImm, "IntImm", "IntImm";
IntImmNode { value: i64 });
impl From<i32> for IntImm {
fn from(i: i32) -> IntImm {
IntImm::new(DataType::int(32, 1), i as i64)
}
}
impl From<i32> for PrimExpr {
fn from(i: i32) -> PrimExpr |
}
define_node!(Var, "Var", "tir.Var";
VarNode { name_hint: TVMString });
define_node!(Add, "Add", "tir.Add"; AddNode { a: PrimExpr, b: PrimExpr });
define_node!(Sub, "Sub", "tir.Sub"; SubNode { a: PrimExpr, b: PrimExpr });
define_node!(Mul, "Mul", "tir.Mul"; MulNode { a: PrimExpr, b: PrimExpr });
define_node!(Div, "Div", "tir.Div"; DivNode { a: PrimExpr, b: PrimExpr });
define_node!(Mod, "Mod", "tir.Mod"; ModNode { a: PrimExpr, b: PrimExpr });
define_node!(FloorDiv, "FloorDiv", "tir.FloorDiv"; FloorDivNode { a: PrimExpr, b: PrimExpr });
define_node!(FloorMod, "FloorMod", "tir.FloorMod"; FloorModNode { a: PrimExpr, b: PrimExpr });
define_node!(Min, "Min", "tir.Min"; MinNode { a: PrimExpr, b: PrimExpr });
define_node!(Max, "Max", "tir.Max"; MaxNode { a: PrimExpr, b: PrimExpr });
// the new datatype is in the base expr
define_node!(Cast, "Cast", "tir.Cast"; CastNode { value: PrimExpr });
// renamed base to start to avoid name clash
define_node!(Ramp, "Ramp", "tir.Ramp"; RampNode { start: PrimExpr, stride: PrimExpr, lanes: i32 });
define_node!(Select, "Select", "tir.Select";
SelectNode { condition: PrimExpr, true_value: PrimExpr, false_value: PrimExpr });
define_node!(Eq, "Eq", "tir.EQ"; EqNode { a: PrimExpr, b: PrimExpr });
define_node!(Ne, "Ne", "tir.NE"; NeNode { a: PrimExpr, b: PrimExpr });
define_node!(Lt, "Lt", "tir.LT"; LtNode { a: PrimExpr, b: PrimExpr });
define_node!(Le, "Le", "tir.LE"; LeNode { a: PrimExpr, b: PrimExpr });
define_node!(Gt, "Gt", "tir.GT"; GtNode { a: PrimExpr, b: PrimExpr });
define_node!(Ge, "Ge", "tir.GE"; GeNode { a: PrimExpr, b: PrimExpr });
define_node!(And, "And", "tir.And"; AndNode { a: PrimExpr, b: PrimExpr });
define_node!(Or, "Or", "tir.Or"; OrNode { a: PrimExpr, b: PrimExpr });
define_node!(Not, "Not", "tir.Not"; NotNode { value: PrimExpr });
define_node!(Let, "Let", "tir.Let"; LetNode { var: Var, value: PrimExpr, body: PrimExpr });
| {
IntImm::from(i).upcast()
} | identifier_body |
write_events.rs | use std::fmt;
use std::error::Error;
use std::ops::Range;
use raw::client_messages::{OperationResult};
use {StreamVersion, LogPosition};
/// Successful response to `Message::WriteEvents`
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WriteEventsCompleted {
/// The event number range assigned to the written events
pub event_numbers: Range<StreamVersion>,
/// Position for `$all` query for one of the written events, perhaps the first?
pub prepare_position: Option<LogPosition>,
/// These can be used to locate last written event from the `$all` stream
pub commit_position: Option<LogPosition>,
}
/// Like `OperationResult` on the wire but does not have a success value. Explains the reason for
/// failure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum WriteEventsFailure {
/// Server failed to process the request before timeout
PrepareTimeout,
/// Server timed out while awaiting commit to be processed
CommitTimeout,
/// Server timed out while awaiting for a forwarded request to complete
ForwardTimeout,
/// Optimistic locking failure; stream version was not the expected
WrongExpectedVersion,
/// Stream has been deleted
StreamDeleted,
/// No authentication provided or insufficient permissions to a stream
AccessDenied,
}
impl WriteEventsFailure {
/// Return `true` if the operation failed in a transient way that might be resolved by
/// retrying.
pub fn is_transient(&self) -> bool {
use self::WriteEventsFailure::*;
match *self {
PrepareTimeout | CommitTimeout | ForwardTimeout => true,
_ => false
}
}
}
impl From<OperationResult> for WriteEventsFailure {
fn from(or: OperationResult) -> Self {
use self::OperationResult::*;
match or {
Success => unreachable!(),
InvalidTransaction => unreachable!(),
PrepareTimeout => WriteEventsFailure::PrepareTimeout,
CommitTimeout => WriteEventsFailure::CommitTimeout,
ForwardTimeout => WriteEventsFailure::ForwardTimeout,
WrongExpectedVersion => WriteEventsFailure::WrongExpectedVersion,
StreamDeleted => WriteEventsFailure::StreamDeleted,
AccessDenied => WriteEventsFailure::AccessDenied,
}
}
}
impl Into<OperationResult> for WriteEventsFailure {
fn into(self) -> OperationResult {
use self::WriteEventsFailure::*;
match self {
PrepareTimeout => OperationResult::PrepareTimeout,
CommitTimeout => OperationResult::CommitTimeout,
ForwardTimeout => OperationResult::ForwardTimeout,
WrongExpectedVersion => OperationResult::WrongExpectedVersion,
StreamDeleted => OperationResult::StreamDeleted,
AccessDenied => OperationResult::AccessDenied
}
}
}
impl fmt::Display for WriteEventsFailure {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.description())
}
}
impl Error for WriteEventsFailure {
fn description(&self) -> &str |
}
| {
use self::WriteEventsFailure::*;
match *self {
PrepareTimeout => "Internal server timeout, should be retried",
CommitTimeout => "Internal server timeout, should be retried",
ForwardTimeout => "Server timed out while awaiting response to forwarded request, should be retried",
WrongExpectedVersion => "Stream version was not expected, optimistic locking failure",
StreamDeleted => "Stream had been deleted",
AccessDenied => "Access to stream was denied"
}
} | identifier_body |
write_events.rs | use std::fmt;
use std::error::Error;
use std::ops::Range;
use raw::client_messages::{OperationResult};
use {StreamVersion, LogPosition};
/// Successful response to `Message::WriteEvents`
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WriteEventsCompleted {
/// The event number range assigned to the written events
pub event_numbers: Range<StreamVersion>,
/// Position for `$all` query for one of the written events, perhaps the first?
pub prepare_position: Option<LogPosition>,
/// These can be used to locate last written event from the `$all` stream
pub commit_position: Option<LogPosition>,
}
/// Like `OperationResult` on the wire but does not have a success value. Explains the reason for
/// failure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum WriteEventsFailure {
/// Server failed to process the request before timeout
PrepareTimeout,
/// Server timed out while awaiting commit to be processed
CommitTimeout,
/// Server timed out while awaiting for a forwarded request to complete
ForwardTimeout,
/// Optimistic locking failure; stream version was not the expected
WrongExpectedVersion,
/// Stream has been deleted
StreamDeleted,
/// No authentication provided or insufficient permissions to a stream
AccessDenied,
}
impl WriteEventsFailure {
/// Return `true` if the operation failed in a transient way that might be resolved by
/// retrying.
pub fn is_transient(&self) -> bool {
use self::WriteEventsFailure::*;
match *self {
PrepareTimeout | CommitTimeout | ForwardTimeout => true,
_ => false
}
}
}
impl From<OperationResult> for WriteEventsFailure {
fn from(or: OperationResult) -> Self {
use self::OperationResult::*;
match or {
Success => unreachable!(),
InvalidTransaction => unreachable!(),
PrepareTimeout => WriteEventsFailure::PrepareTimeout,
CommitTimeout => WriteEventsFailure::CommitTimeout,
ForwardTimeout => WriteEventsFailure::ForwardTimeout,
WrongExpectedVersion => WriteEventsFailure::WrongExpectedVersion,
StreamDeleted => WriteEventsFailure::StreamDeleted,
AccessDenied => WriteEventsFailure::AccessDenied,
}
}
}
impl Into<OperationResult> for WriteEventsFailure {
fn into(self) -> OperationResult {
use self::WriteEventsFailure::*;
match self {
PrepareTimeout => OperationResult::PrepareTimeout,
CommitTimeout => OperationResult::CommitTimeout,
ForwardTimeout => OperationResult::ForwardTimeout,
WrongExpectedVersion => OperationResult::WrongExpectedVersion,
StreamDeleted => OperationResult::StreamDeleted,
AccessDenied => OperationResult::AccessDenied
}
}
}
impl fmt::Display for WriteEventsFailure {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.description())
}
}
impl Error for WriteEventsFailure {
fn | (&self) -> &str {
use self::WriteEventsFailure::*;
match *self {
PrepareTimeout => "Internal server timeout, should be retried",
CommitTimeout => "Internal server timeout, should be retried",
ForwardTimeout => "Server timed out while awaiting response to forwarded request, should be retried",
WrongExpectedVersion => "Stream version was not expected, optimistic locking failure",
StreamDeleted => "Stream had been deleted",
AccessDenied => "Access to stream was denied"
}
}
}
| description | identifier_name |
write_events.rs | use std::fmt;
use std::error::Error;
use std::ops::Range;
use raw::client_messages::{OperationResult};
use {StreamVersion, LogPosition};
/// Successful response to `Message::WriteEvents`
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WriteEventsCompleted {
/// The event number range assigned to the written events
pub event_numbers: Range<StreamVersion>,
/// Position for `$all` query for one of the written events, perhaps the first?
pub prepare_position: Option<LogPosition>,
/// These can be used to locate last written event from the `$all` stream
pub commit_position: Option<LogPosition>,
}
/// Like `OperationResult` on the wire but does not have a success value. Explains the reason for
/// failure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum WriteEventsFailure {
/// Server failed to process the request before timeout
PrepareTimeout,
/// Server timed out while awaiting commit to be processed
CommitTimeout,
/// Server timed out while awaiting for a forwarded request to complete | /// No authentication provided or insufficient permissions to a stream
AccessDenied,
}
impl WriteEventsFailure {
/// Return `true` if the operation failed in a transient way that might be resolved by
/// retrying.
pub fn is_transient(&self) -> bool {
use self::WriteEventsFailure::*;
match *self {
PrepareTimeout | CommitTimeout | ForwardTimeout => true,
_ => false
}
}
}
impl From<OperationResult> for WriteEventsFailure {
fn from(or: OperationResult) -> Self {
use self::OperationResult::*;
match or {
Success => unreachable!(),
InvalidTransaction => unreachable!(),
PrepareTimeout => WriteEventsFailure::PrepareTimeout,
CommitTimeout => WriteEventsFailure::CommitTimeout,
ForwardTimeout => WriteEventsFailure::ForwardTimeout,
WrongExpectedVersion => WriteEventsFailure::WrongExpectedVersion,
StreamDeleted => WriteEventsFailure::StreamDeleted,
AccessDenied => WriteEventsFailure::AccessDenied,
}
}
}
impl Into<OperationResult> for WriteEventsFailure {
fn into(self) -> OperationResult {
use self::WriteEventsFailure::*;
match self {
PrepareTimeout => OperationResult::PrepareTimeout,
CommitTimeout => OperationResult::CommitTimeout,
ForwardTimeout => OperationResult::ForwardTimeout,
WrongExpectedVersion => OperationResult::WrongExpectedVersion,
StreamDeleted => OperationResult::StreamDeleted,
AccessDenied => OperationResult::AccessDenied
}
}
}
impl fmt::Display for WriteEventsFailure {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.description())
}
}
impl Error for WriteEventsFailure {
fn description(&self) -> &str {
use self::WriteEventsFailure::*;
match *self {
PrepareTimeout => "Internal server timeout, should be retried",
CommitTimeout => "Internal server timeout, should be retried",
ForwardTimeout => "Server timed out while awaiting response to forwarded request, should be retried",
WrongExpectedVersion => "Stream version was not expected, optimistic locking failure",
StreamDeleted => "Stream had been deleted",
AccessDenied => "Access to stream was denied"
}
}
} | ForwardTimeout,
/// Optimistic locking failure; stream version was not the expected
WrongExpectedVersion,
/// Stream has been deleted
StreamDeleted, | random_line_split |
color.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Specified color values.
use cssparser::{Color as CSSParserColor, Parser, RGBA, Token, BasicParseError};
#[cfg(feature = "gecko")]
use gecko_bindings::structs::nscolor;
use itoa;
use parser::{ParserContext, Parse};
#[cfg(feature = "gecko")]
use properties::longhands::system_colors::SystemColor;
use std::fmt;
use std::io::Write;
use style_traits::{ToCss, ParseError, StyleParseError, ValueParseError};
use super::AllowQuirks;
use values::computed::{Color as ComputedColor, Context, ToComputedValue};
/// Specified color value
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Color {
/// The 'currentColor' keyword
CurrentColor,
/// A specific RGBA color
Numeric {
/// Parsed RGBA color
parsed: RGBA,
/// Authored representation
authored: Option<Box<str>>,
},
/// A complex color value from computed value
Complex(ComputedColor),
/// A system color
#[cfg(feature = "gecko")]
System(SystemColor),
/// A special color keyword value used in Gecko
#[cfg(feature = "gecko")]
Special(gecko::SpecialColorKeyword),
/// Quirksmode-only rule for inheriting color from the body
#[cfg(feature = "gecko")]
InheritFromBodyQuirk,
}
#[cfg(feature = "gecko")]
mod gecko {
use style_traits::ToCss;
define_css_keyword_enum! { SpecialColorKeyword:
"-moz-default-color" => MozDefaultColor,
"-moz-default-background-color" => MozDefaultBackgroundColor,
"-moz-hyperlinktext" => MozHyperlinktext,
"-moz-activehyperlinktext" => MozActiveHyperlinktext,
"-moz-visitedhyperlinktext" => MozVisitedHyperlinktext,
}
}
impl From<RGBA> for Color {
fn from(value: RGBA) -> Self {
Color::rgba(value)
}
}
impl Parse for Color {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
// Currently we only store authored value for color keywords,
// because all browsers serialize those values as keywords for
// specified value.
let start = input.state();
let authored = match input.next() {
Ok(&Token::Ident(ref s)) => Some(s.to_lowercase().into_boxed_str()),
_ => None,
};
input.reset(&start);
match input.try(CSSParserColor::parse) {
Ok(value) =>
Ok(match value {
CSSParserColor::CurrentColor => Color::CurrentColor,
CSSParserColor::RGBA(rgba) => Color::Numeric {
parsed: rgba,
authored: authored,
},
}),
Err(e) => {
#[cfg(feature = "gecko")] {
if let Ok(system) = input.try(SystemColor::parse) {
return Ok(Color::System(system));
} else if let Ok(c) = gecko::SpecialColorKeyword::parse(input) {
return Ok(Color::Special(c));
}
}
match e {
BasicParseError::UnexpectedToken(t) =>
Err(StyleParseError::ValueError(ValueParseError::InvalidColor(t)).into()),
e => Err(e.into())
}
}
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Color::CurrentColor => CSSParserColor::CurrentColor.to_css(dest),
Color::Numeric { authored: Some(ref authored),.. } => dest.write_str(authored),
Color::Numeric { parsed: ref rgba,.. } => rgba.to_css(dest),
Color::Complex(_) => Ok(()),
#[cfg(feature = "gecko")]
Color::System(system) => system.to_css(dest),
#[cfg(feature = "gecko")]
Color::Special(special) => special.to_css(dest),
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => Ok(()),
}
}
}
/// A wrapper of cssparser::Color::parse_hash.
///
/// That function should never return CurrentColor, so it makes no sense
/// to handle a cssparser::Color here. This should really be done in
/// cssparser directly rather than here.
fn parse_hash_color(value: &[u8]) -> Result<RGBA, ()> {
CSSParserColor::parse_hash(value).map(|color| {
match color {
CSSParserColor::RGBA(rgba) => rgba,
CSSParserColor::CurrentColor =>
unreachable!("parse_hash should never return currentcolor"),
}
})
}
impl Color {
/// Returns currentcolor value.
#[inline]
pub fn currentcolor() -> Color {
Color::CurrentColor
}
/// Returns transparent value.
#[inline]
pub fn transparent() -> Color {
// We should probably set authored to "transparent", but maybe it doesn't matter.
Color::rgba(RGBA::transparent())
}
/// Returns a numeric RGBA color value.
#[inline]
pub fn rgba(rgba: RGBA) -> Self {
Color::Numeric {
parsed: rgba,
authored: None,
}
}
/// Parse a color, with quirks.
///
/// https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
input.try(|i| Self::parse(context, i)).or_else(|e| {
if!allow_quirks.allowed(context.quirks_mode) {
return Err(e);
}
Color::parse_quirky_color(input)
.map(|rgba| Color::rgba(rgba))
.map_err(|_| e)
})
}
/// Parse a <quirky-color> value.
///
/// https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk
fn parse_quirky_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<RGBA, ParseError<'i>> {
let (value, unit) = match *input.next()? {
Token::Number { int_value: Some(integer),.. } => {
(integer, None)
},
Token::Dimension { int_value: Some(integer), ref unit,.. } => {
(integer, Some(unit))
},
Token::Ident(ref ident) => {
if ident.len()!= 3 && ident.len()!= 6 {
return Err(StyleParseError::UnspecifiedError.into());
}
return parse_hash_color(ident.as_bytes())
.map_err(|()| StyleParseError::UnspecifiedError.into());
}
ref t => {
return Err(BasicParseError::UnexpectedToken(t.clone()).into());
},
};
if value < 0 {
return Err(StyleParseError::UnspecifiedError.into());
}
let length = if value <= 9 {
1
} else if value <= 99 {
2
} else if value <= 999 {
3
} else if value <= 9999 {
4
} else if value <= 99999 {
5
} else if value <= 999999 {
6
} else {
return Err(StyleParseError::UnspecifiedError.into())
};
let total = length + unit.as_ref().map_or(0, |d| d.len());
if total > 6 {
return Err(StyleParseError::UnspecifiedError.into());
}
let mut serialization = [b'0'; 6];
let space_padding = 6 - total;
let mut written = space_padding;
written += itoa::write(&mut serialization[written..], value).unwrap();
if let Some(unit) = unit {
written += (&mut serialization[written..]).write(unit.as_bytes()).unwrap();
}
debug_assert!(written == 6);
parse_hash_color(&serialization).map_err(|()| StyleParseError::UnspecifiedError.into())
}
/// Returns false if the color is completely transparent, and
/// true otherwise.
pub fn is_non_transparent(&self) -> bool {
match *self {
Color::Numeric { ref parsed,.. } => parsed.alpha!= 0,
_ => true,
}
}
}
#[cfg(feature = "gecko")]
fn convert_nscolor_to_computedcolor(color: nscolor) -> ComputedColor {
use gecko::values::convert_nscolor_to_rgba;
ComputedColor::rgba(convert_nscolor_to_rgba(color))
}
impl ToComputedValue for Color {
type ComputedValue = ComputedColor;
fn to_computed_value(&self, context: &Context) -> ComputedColor {
match *self {
Color::CurrentColor => {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut()
.set_uncacheable();
}
}
ComputedColor::currentcolor()
}
Color::Numeric { ref parsed,.. } => ComputedColor::rgba(*parsed),
Color::Complex(ref complex) => {
if complex.foreground_ratio!= 0 {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut()
.set_uncacheable();
}
}
}
*complex
}
#[cfg(feature = "gecko")]
Color::System(system) =>
convert_nscolor_to_computedcolor(system.to_computed_value(context)),
#[cfg(feature = "gecko")]
Color::Special(special) => {
use self::gecko::SpecialColorKeyword as Keyword;
let pres_context = context.device().pres_context();
convert_nscolor_to_computedcolor(match special {
Keyword::MozDefaultColor => pres_context.mDefaultColor,
Keyword::MozDefaultBackgroundColor => pres_context.mBackgroundColor,
Keyword::MozHyperlinktext => pres_context.mLinkColor,
Keyword::MozActiveHyperlinktext => pres_context.mActiveLinkColor,
Keyword::MozVisitedHyperlinktext => pres_context.mVisitedLinkColor,
})
}
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => {
ComputedColor::rgba(context.device().body_text_color())
},
}
}
fn from_computed_value(computed: &ComputedColor) -> Self {
if computed.is_numeric() {
Color::rgba(computed.color)
} else if computed.is_currentcolor() {
Color::currentcolor()
} else {
Color::Complex(*computed)
}
}
}
/// Specified color value, but resolved to just RGBA for computed value
/// with value from color property at the same context.
#[derive(Clone, Debug, PartialEq, ToCss)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct RGBAColor(pub Color);
impl Parse for RGBAColor {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Color::parse(context, input).map(RGBAColor)
}
}
impl ToComputedValue for RGBAColor {
type ComputedValue = RGBA;
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0.to_computed_value(context)
.to_rgba(context.style().get_color().clone_color())
}
fn from_computed_value(computed: &RGBA) -> Self {
RGBAColor(Color::rgba(*computed))
}
}
impl From<Color> for RGBAColor {
fn from(color: Color) -> RGBAColor |
}
/// Specified value for the "color" property, which resolves the `currentcolor`
/// keyword to the parent color instead of self's color.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub struct ColorPropertyValue(pub Color);
impl ToComputedValue for ColorPropertyValue {
type ComputedValue = RGBA;
#[inline]
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0.to_computed_value(context)
.to_rgba(context.builder.get_parent_color().clone_color())
}
#[inline]
fn from_computed_value(computed: &RGBA) -> Self {
ColorPropertyValue(Color::rgba(*computed).into())
}
}
impl Parse for ColorPropertyValue {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse_quirky(context, input, AllowQuirks::Yes).map(ColorPropertyValue)
}
}
| {
RGBAColor(color)
} | identifier_body |
color.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Specified color values.
use cssparser::{Color as CSSParserColor, Parser, RGBA, Token, BasicParseError};
#[cfg(feature = "gecko")]
use gecko_bindings::structs::nscolor;
use itoa;
use parser::{ParserContext, Parse};
#[cfg(feature = "gecko")]
use properties::longhands::system_colors::SystemColor;
use std::fmt;
use std::io::Write;
use style_traits::{ToCss, ParseError, StyleParseError, ValueParseError};
use super::AllowQuirks;
use values::computed::{Color as ComputedColor, Context, ToComputedValue};
/// Specified color value
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Color {
/// The 'currentColor' keyword
CurrentColor,
/// A specific RGBA color
Numeric {
/// Parsed RGBA color
parsed: RGBA,
/// Authored representation
authored: Option<Box<str>>,
},
/// A complex color value from computed value
Complex(ComputedColor),
/// A system color
#[cfg(feature = "gecko")]
System(SystemColor),
/// A special color keyword value used in Gecko
#[cfg(feature = "gecko")]
Special(gecko::SpecialColorKeyword),
/// Quirksmode-only rule for inheriting color from the body
#[cfg(feature = "gecko")]
InheritFromBodyQuirk,
}
#[cfg(feature = "gecko")]
mod gecko {
use style_traits::ToCss;
define_css_keyword_enum! { SpecialColorKeyword:
"-moz-default-color" => MozDefaultColor,
"-moz-default-background-color" => MozDefaultBackgroundColor,
"-moz-hyperlinktext" => MozHyperlinktext,
"-moz-activehyperlinktext" => MozActiveHyperlinktext,
"-moz-visitedhyperlinktext" => MozVisitedHyperlinktext,
}
}
impl From<RGBA> for Color {
fn from(value: RGBA) -> Self {
Color::rgba(value)
}
}
impl Parse for Color {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
// Currently we only store authored value for color keywords,
// because all browsers serialize those values as keywords for
// specified value.
let start = input.state();
let authored = match input.next() {
Ok(&Token::Ident(ref s)) => Some(s.to_lowercase().into_boxed_str()),
_ => None,
};
input.reset(&start);
match input.try(CSSParserColor::parse) {
Ok(value) =>
Ok(match value {
CSSParserColor::CurrentColor => Color::CurrentColor,
CSSParserColor::RGBA(rgba) => Color::Numeric {
parsed: rgba,
authored: authored,
},
}),
Err(e) => {
#[cfg(feature = "gecko")] {
if let Ok(system) = input.try(SystemColor::parse) {
return Ok(Color::System(system));
} else if let Ok(c) = gecko::SpecialColorKeyword::parse(input) {
return Ok(Color::Special(c));
}
}
match e {
BasicParseError::UnexpectedToken(t) =>
Err(StyleParseError::ValueError(ValueParseError::InvalidColor(t)).into()),
e => Err(e.into())
}
}
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Color::CurrentColor => CSSParserColor::CurrentColor.to_css(dest),
Color::Numeric { authored: Some(ref authored),.. } => dest.write_str(authored),
Color::Numeric { parsed: ref rgba,.. } => rgba.to_css(dest),
Color::Complex(_) => Ok(()),
#[cfg(feature = "gecko")]
Color::System(system) => system.to_css(dest),
#[cfg(feature = "gecko")]
Color::Special(special) => special.to_css(dest),
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => Ok(()),
}
}
}
/// A wrapper of cssparser::Color::parse_hash.
///
/// That function should never return CurrentColor, so it makes no sense
/// to handle a cssparser::Color here. This should really be done in
/// cssparser directly rather than here.
fn parse_hash_color(value: &[u8]) -> Result<RGBA, ()> {
CSSParserColor::parse_hash(value).map(|color| {
match color {
CSSParserColor::RGBA(rgba) => rgba,
CSSParserColor::CurrentColor =>
unreachable!("parse_hash should never return currentcolor"),
}
})
}
impl Color {
/// Returns currentcolor value.
#[inline]
pub fn currentcolor() -> Color {
Color::CurrentColor
}
/// Returns transparent value.
#[inline]
pub fn transparent() -> Color {
// We should probably set authored to "transparent", but maybe it doesn't matter.
Color::rgba(RGBA::transparent())
}
/// Returns a numeric RGBA color value.
#[inline]
pub fn rgba(rgba: RGBA) -> Self {
Color::Numeric {
parsed: rgba,
authored: None,
}
}
/// Parse a color, with quirks.
///
/// https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
input.try(|i| Self::parse(context, i)).or_else(|e| {
if!allow_quirks.allowed(context.quirks_mode) {
return Err(e);
}
Color::parse_quirky_color(input)
.map(|rgba| Color::rgba(rgba))
.map_err(|_| e)
})
}
/// Parse a <quirky-color> value.
///
/// https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk
fn parse_quirky_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<RGBA, ParseError<'i>> {
let (value, unit) = match *input.next()? {
Token::Number { int_value: Some(integer),.. } => {
(integer, None)
},
Token::Dimension { int_value: Some(integer), ref unit,.. } => {
(integer, Some(unit))
},
Token::Ident(ref ident) => {
if ident.len()!= 3 && ident.len()!= 6 {
return Err(StyleParseError::UnspecifiedError.into());
}
return parse_hash_color(ident.as_bytes())
.map_err(|()| StyleParseError::UnspecifiedError.into());
}
ref t => | ,
};
if value < 0 {
return Err(StyleParseError::UnspecifiedError.into());
}
let length = if value <= 9 {
1
} else if value <= 99 {
2
} else if value <= 999 {
3
} else if value <= 9999 {
4
} else if value <= 99999 {
5
} else if value <= 999999 {
6
} else {
return Err(StyleParseError::UnspecifiedError.into())
};
let total = length + unit.as_ref().map_or(0, |d| d.len());
if total > 6 {
return Err(StyleParseError::UnspecifiedError.into());
}
let mut serialization = [b'0'; 6];
let space_padding = 6 - total;
let mut written = space_padding;
written += itoa::write(&mut serialization[written..], value).unwrap();
if let Some(unit) = unit {
written += (&mut serialization[written..]).write(unit.as_bytes()).unwrap();
}
debug_assert!(written == 6);
parse_hash_color(&serialization).map_err(|()| StyleParseError::UnspecifiedError.into())
}
/// Returns false if the color is completely transparent, and
/// true otherwise.
pub fn is_non_transparent(&self) -> bool {
match *self {
Color::Numeric { ref parsed,.. } => parsed.alpha!= 0,
_ => true,
}
}
}
#[cfg(feature = "gecko")]
fn convert_nscolor_to_computedcolor(color: nscolor) -> ComputedColor {
use gecko::values::convert_nscolor_to_rgba;
ComputedColor::rgba(convert_nscolor_to_rgba(color))
}
impl ToComputedValue for Color {
type ComputedValue = ComputedColor;
fn to_computed_value(&self, context: &Context) -> ComputedColor {
match *self {
Color::CurrentColor => {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut()
.set_uncacheable();
}
}
ComputedColor::currentcolor()
}
Color::Numeric { ref parsed,.. } => ComputedColor::rgba(*parsed),
Color::Complex(ref complex) => {
if complex.foreground_ratio!= 0 {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut()
.set_uncacheable();
}
}
}
*complex
}
#[cfg(feature = "gecko")]
Color::System(system) =>
convert_nscolor_to_computedcolor(system.to_computed_value(context)),
#[cfg(feature = "gecko")]
Color::Special(special) => {
use self::gecko::SpecialColorKeyword as Keyword;
let pres_context = context.device().pres_context();
convert_nscolor_to_computedcolor(match special {
Keyword::MozDefaultColor => pres_context.mDefaultColor,
Keyword::MozDefaultBackgroundColor => pres_context.mBackgroundColor,
Keyword::MozHyperlinktext => pres_context.mLinkColor,
Keyword::MozActiveHyperlinktext => pres_context.mActiveLinkColor,
Keyword::MozVisitedHyperlinktext => pres_context.mVisitedLinkColor,
})
}
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => {
ComputedColor::rgba(context.device().body_text_color())
},
}
}
fn from_computed_value(computed: &ComputedColor) -> Self {
if computed.is_numeric() {
Color::rgba(computed.color)
} else if computed.is_currentcolor() {
Color::currentcolor()
} else {
Color::Complex(*computed)
}
}
}
/// Specified color value, but resolved to just RGBA for computed value
/// with value from color property at the same context.
#[derive(Clone, Debug, PartialEq, ToCss)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct RGBAColor(pub Color);
impl Parse for RGBAColor {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Color::parse(context, input).map(RGBAColor)
}
}
impl ToComputedValue for RGBAColor {
type ComputedValue = RGBA;
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0.to_computed_value(context)
.to_rgba(context.style().get_color().clone_color())
}
fn from_computed_value(computed: &RGBA) -> Self {
RGBAColor(Color::rgba(*computed))
}
}
impl From<Color> for RGBAColor {
fn from(color: Color) -> RGBAColor {
RGBAColor(color)
}
}
/// Specified value for the "color" property, which resolves the `currentcolor`
/// keyword to the parent color instead of self's color.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub struct ColorPropertyValue(pub Color);
impl ToComputedValue for ColorPropertyValue {
type ComputedValue = RGBA;
#[inline]
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0.to_computed_value(context)
.to_rgba(context.builder.get_parent_color().clone_color())
}
#[inline]
fn from_computed_value(computed: &RGBA) -> Self {
ColorPropertyValue(Color::rgba(*computed).into())
}
}
impl Parse for ColorPropertyValue {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse_quirky(context, input, AllowQuirks::Yes).map(ColorPropertyValue)
}
}
| {
return Err(BasicParseError::UnexpectedToken(t.clone()).into());
} | conditional_block |
color.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Specified color values.
use cssparser::{Color as CSSParserColor, Parser, RGBA, Token, BasicParseError};
#[cfg(feature = "gecko")]
use gecko_bindings::structs::nscolor;
use itoa;
use parser::{ParserContext, Parse};
#[cfg(feature = "gecko")]
use properties::longhands::system_colors::SystemColor;
use std::fmt;
use std::io::Write;
use style_traits::{ToCss, ParseError, StyleParseError, ValueParseError};
use super::AllowQuirks;
use values::computed::{Color as ComputedColor, Context, ToComputedValue};
/// Specified color value
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Color {
/// The 'currentColor' keyword
CurrentColor,
/// A specific RGBA color
Numeric {
/// Parsed RGBA color
parsed: RGBA,
/// Authored representation
authored: Option<Box<str>>,
},
/// A complex color value from computed value
Complex(ComputedColor),
/// A system color
#[cfg(feature = "gecko")]
System(SystemColor),
/// A special color keyword value used in Gecko
#[cfg(feature = "gecko")]
Special(gecko::SpecialColorKeyword),
/// Quirksmode-only rule for inheriting color from the body
#[cfg(feature = "gecko")]
InheritFromBodyQuirk,
}
#[cfg(feature = "gecko")]
mod gecko {
use style_traits::ToCss;
define_css_keyword_enum! { SpecialColorKeyword:
"-moz-default-color" => MozDefaultColor,
"-moz-default-background-color" => MozDefaultBackgroundColor,
"-moz-hyperlinktext" => MozHyperlinktext,
"-moz-activehyperlinktext" => MozActiveHyperlinktext,
"-moz-visitedhyperlinktext" => MozVisitedHyperlinktext,
}
}
impl From<RGBA> for Color {
fn from(value: RGBA) -> Self {
Color::rgba(value)
}
}
impl Parse for Color {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
// Currently we only store authored value for color keywords,
// because all browsers serialize those values as keywords for
// specified value.
let start = input.state();
let authored = match input.next() {
Ok(&Token::Ident(ref s)) => Some(s.to_lowercase().into_boxed_str()),
_ => None,
};
input.reset(&start);
match input.try(CSSParserColor::parse) {
Ok(value) =>
Ok(match value {
CSSParserColor::CurrentColor => Color::CurrentColor,
CSSParserColor::RGBA(rgba) => Color::Numeric {
parsed: rgba,
authored: authored,
},
}),
Err(e) => {
#[cfg(feature = "gecko")] {
if let Ok(system) = input.try(SystemColor::parse) {
return Ok(Color::System(system));
} else if let Ok(c) = gecko::SpecialColorKeyword::parse(input) {
return Ok(Color::Special(c));
}
}
match e {
BasicParseError::UnexpectedToken(t) =>
Err(StyleParseError::ValueError(ValueParseError::InvalidColor(t)).into()),
e => Err(e.into())
}
}
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Color::CurrentColor => CSSParserColor::CurrentColor.to_css(dest),
Color::Numeric { authored: Some(ref authored),.. } => dest.write_str(authored),
Color::Numeric { parsed: ref rgba,.. } => rgba.to_css(dest),
Color::Complex(_) => Ok(()),
#[cfg(feature = "gecko")]
Color::System(system) => system.to_css(dest),
#[cfg(feature = "gecko")]
Color::Special(special) => special.to_css(dest),
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => Ok(()),
}
}
}
/// A wrapper of cssparser::Color::parse_hash.
///
/// That function should never return CurrentColor, so it makes no sense
/// to handle a cssparser::Color here. This should really be done in
/// cssparser directly rather than here.
fn parse_hash_color(value: &[u8]) -> Result<RGBA, ()> {
CSSParserColor::parse_hash(value).map(|color| {
match color {
CSSParserColor::RGBA(rgba) => rgba,
CSSParserColor::CurrentColor =>
unreachable!("parse_hash should never return currentcolor"),
}
})
}
impl Color {
/// Returns currentcolor value.
#[inline]
pub fn currentcolor() -> Color {
Color::CurrentColor
}
/// Returns transparent value.
#[inline]
pub fn transparent() -> Color {
// We should probably set authored to "transparent", but maybe it doesn't matter.
Color::rgba(RGBA::transparent())
}
/// Returns a numeric RGBA color value.
#[inline]
pub fn rgba(rgba: RGBA) -> Self {
Color::Numeric {
parsed: rgba,
authored: None,
}
}
/// Parse a color, with quirks.
///
/// https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
input.try(|i| Self::parse(context, i)).or_else(|e| {
if!allow_quirks.allowed(context.quirks_mode) {
return Err(e);
}
Color::parse_quirky_color(input)
.map(|rgba| Color::rgba(rgba))
.map_err(|_| e)
})
}
/// Parse a <quirky-color> value.
///
/// https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk
fn parse_quirky_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<RGBA, ParseError<'i>> {
let (value, unit) = match *input.next()? {
Token::Number { int_value: Some(integer),.. } => {
(integer, None)
},
Token::Dimension { int_value: Some(integer), ref unit,.. } => {
(integer, Some(unit))
},
Token::Ident(ref ident) => {
if ident.len()!= 3 && ident.len()!= 6 {
return Err(StyleParseError::UnspecifiedError.into());
}
return parse_hash_color(ident.as_bytes())
.map_err(|()| StyleParseError::UnspecifiedError.into());
}
ref t => {
return Err(BasicParseError::UnexpectedToken(t.clone()).into());
},
};
if value < 0 {
return Err(StyleParseError::UnspecifiedError.into());
}
let length = if value <= 9 {
1
} else if value <= 99 {
2
} else if value <= 999 {
3
} else if value <= 9999 {
4
} else if value <= 99999 {
5
} else if value <= 999999 {
6
} else {
return Err(StyleParseError::UnspecifiedError.into())
};
let total = length + unit.as_ref().map_or(0, |d| d.len());
if total > 6 {
return Err(StyleParseError::UnspecifiedError.into());
}
let mut serialization = [b'0'; 6];
let space_padding = 6 - total;
let mut written = space_padding;
written += itoa::write(&mut serialization[written..], value).unwrap();
if let Some(unit) = unit {
written += (&mut serialization[written..]).write(unit.as_bytes()).unwrap();
}
debug_assert!(written == 6);
parse_hash_color(&serialization).map_err(|()| StyleParseError::UnspecifiedError.into())
}
/// Returns false if the color is completely transparent, and
/// true otherwise.
pub fn is_non_transparent(&self) -> bool {
match *self {
Color::Numeric { ref parsed,.. } => parsed.alpha!= 0,
_ => true,
}
}
}
#[cfg(feature = "gecko")]
fn | (color: nscolor) -> ComputedColor {
use gecko::values::convert_nscolor_to_rgba;
ComputedColor::rgba(convert_nscolor_to_rgba(color))
}
impl ToComputedValue for Color {
type ComputedValue = ComputedColor;
fn to_computed_value(&self, context: &Context) -> ComputedColor {
match *self {
Color::CurrentColor => {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut()
.set_uncacheable();
}
}
ComputedColor::currentcolor()
}
Color::Numeric { ref parsed,.. } => ComputedColor::rgba(*parsed),
Color::Complex(ref complex) => {
if complex.foreground_ratio!= 0 {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut()
.set_uncacheable();
}
}
}
*complex
}
#[cfg(feature = "gecko")]
Color::System(system) =>
convert_nscolor_to_computedcolor(system.to_computed_value(context)),
#[cfg(feature = "gecko")]
Color::Special(special) => {
use self::gecko::SpecialColorKeyword as Keyword;
let pres_context = context.device().pres_context();
convert_nscolor_to_computedcolor(match special {
Keyword::MozDefaultColor => pres_context.mDefaultColor,
Keyword::MozDefaultBackgroundColor => pres_context.mBackgroundColor,
Keyword::MozHyperlinktext => pres_context.mLinkColor,
Keyword::MozActiveHyperlinktext => pres_context.mActiveLinkColor,
Keyword::MozVisitedHyperlinktext => pres_context.mVisitedLinkColor,
})
}
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => {
ComputedColor::rgba(context.device().body_text_color())
},
}
}
fn from_computed_value(computed: &ComputedColor) -> Self {
if computed.is_numeric() {
Color::rgba(computed.color)
} else if computed.is_currentcolor() {
Color::currentcolor()
} else {
Color::Complex(*computed)
}
}
}
/// Specified color value, but resolved to just RGBA for computed value
/// with value from color property at the same context.
#[derive(Clone, Debug, PartialEq, ToCss)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct RGBAColor(pub Color);
impl Parse for RGBAColor {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Color::parse(context, input).map(RGBAColor)
}
}
impl ToComputedValue for RGBAColor {
type ComputedValue = RGBA;
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0.to_computed_value(context)
.to_rgba(context.style().get_color().clone_color())
}
fn from_computed_value(computed: &RGBA) -> Self {
RGBAColor(Color::rgba(*computed))
}
}
impl From<Color> for RGBAColor {
fn from(color: Color) -> RGBAColor {
RGBAColor(color)
}
}
/// Specified value for the "color" property, which resolves the `currentcolor`
/// keyword to the parent color instead of self's color.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub struct ColorPropertyValue(pub Color);
impl ToComputedValue for ColorPropertyValue {
type ComputedValue = RGBA;
#[inline]
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0.to_computed_value(context)
.to_rgba(context.builder.get_parent_color().clone_color())
}
#[inline]
fn from_computed_value(computed: &RGBA) -> Self {
ColorPropertyValue(Color::rgba(*computed).into())
}
}
impl Parse for ColorPropertyValue {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse_quirky(context, input, AllowQuirks::Yes).map(ColorPropertyValue)
}
}
| convert_nscolor_to_computedcolor | identifier_name |
color.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Specified color values.
use cssparser::{Color as CSSParserColor, Parser, RGBA, Token, BasicParseError};
#[cfg(feature = "gecko")]
use gecko_bindings::structs::nscolor;
use itoa;
use parser::{ParserContext, Parse};
#[cfg(feature = "gecko")]
use properties::longhands::system_colors::SystemColor;
use std::fmt;
use std::io::Write;
use style_traits::{ToCss, ParseError, StyleParseError, ValueParseError};
use super::AllowQuirks;
use values::computed::{Color as ComputedColor, Context, ToComputedValue};
/// Specified color value
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Color {
/// The 'currentColor' keyword
CurrentColor,
/// A specific RGBA color
Numeric {
/// Parsed RGBA color
parsed: RGBA,
/// Authored representation
authored: Option<Box<str>>,
},
/// A complex color value from computed value
Complex(ComputedColor),
/// A system color
#[cfg(feature = "gecko")]
System(SystemColor),
/// A special color keyword value used in Gecko
#[cfg(feature = "gecko")]
Special(gecko::SpecialColorKeyword),
/// Quirksmode-only rule for inheriting color from the body
#[cfg(feature = "gecko")]
InheritFromBodyQuirk,
}
#[cfg(feature = "gecko")]
mod gecko {
use style_traits::ToCss;
define_css_keyword_enum! { SpecialColorKeyword:
"-moz-default-color" => MozDefaultColor,
"-moz-default-background-color" => MozDefaultBackgroundColor,
"-moz-hyperlinktext" => MozHyperlinktext,
"-moz-activehyperlinktext" => MozActiveHyperlinktext,
"-moz-visitedhyperlinktext" => MozVisitedHyperlinktext,
}
}
impl From<RGBA> for Color {
fn from(value: RGBA) -> Self {
Color::rgba(value)
}
}
impl Parse for Color {
fn parse<'i, 't>(_: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
// Currently we only store authored value for color keywords,
// because all browsers serialize those values as keywords for
// specified value.
let start = input.state();
let authored = match input.next() {
Ok(&Token::Ident(ref s)) => Some(s.to_lowercase().into_boxed_str()),
_ => None,
};
input.reset(&start);
match input.try(CSSParserColor::parse) {
Ok(value) =>
Ok(match value {
CSSParserColor::CurrentColor => Color::CurrentColor,
CSSParserColor::RGBA(rgba) => Color::Numeric {
parsed: rgba,
authored: authored,
},
}),
Err(e) => {
#[cfg(feature = "gecko")] {
if let Ok(system) = input.try(SystemColor::parse) {
return Ok(Color::System(system));
} else if let Ok(c) = gecko::SpecialColorKeyword::parse(input) {
return Ok(Color::Special(c));
}
}
match e {
BasicParseError::UnexpectedToken(t) =>
Err(StyleParseError::ValueError(ValueParseError::InvalidColor(t)).into()),
e => Err(e.into())
}
}
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Color::CurrentColor => CSSParserColor::CurrentColor.to_css(dest),
Color::Numeric { authored: Some(ref authored),.. } => dest.write_str(authored),
Color::Numeric { parsed: ref rgba,.. } => rgba.to_css(dest),
Color::Complex(_) => Ok(()),
#[cfg(feature = "gecko")]
Color::System(system) => system.to_css(dest),
#[cfg(feature = "gecko")]
Color::Special(special) => special.to_css(dest),
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => Ok(()),
}
}
}
/// A wrapper of cssparser::Color::parse_hash.
///
/// That function should never return CurrentColor, so it makes no sense
/// to handle a cssparser::Color here. This should really be done in
/// cssparser directly rather than here.
fn parse_hash_color(value: &[u8]) -> Result<RGBA, ()> {
CSSParserColor::parse_hash(value).map(|color| {
match color {
CSSParserColor::RGBA(rgba) => rgba,
CSSParserColor::CurrentColor =>
unreachable!("parse_hash should never return currentcolor"),
}
})
}
impl Color {
/// Returns currentcolor value.
#[inline]
pub fn currentcolor() -> Color {
Color::CurrentColor
}
/// Returns transparent value.
#[inline]
pub fn transparent() -> Color {
// We should probably set authored to "transparent", but maybe it doesn't matter.
Color::rgba(RGBA::transparent())
}
/// Returns a numeric RGBA color value.
#[inline]
pub fn rgba(rgba: RGBA) -> Self {
Color::Numeric {
parsed: rgba,
authored: None,
}
}
/// Parse a color, with quirks.
///
/// https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk
pub fn parse_quirky<'i, 't>(context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks)
-> Result<Self, ParseError<'i>> {
input.try(|i| Self::parse(context, i)).or_else(|e| {
if!allow_quirks.allowed(context.quirks_mode) {
return Err(e);
}
Color::parse_quirky_color(input)
.map(|rgba| Color::rgba(rgba))
.map_err(|_| e)
})
}
/// Parse a <quirky-color> value.
///
/// https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk
fn parse_quirky_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<RGBA, ParseError<'i>> {
let (value, unit) = match *input.next()? {
Token::Number { int_value: Some(integer),.. } => {
(integer, None)
},
Token::Dimension { int_value: Some(integer), ref unit,.. } => {
(integer, Some(unit))
},
Token::Ident(ref ident) => {
if ident.len()!= 3 && ident.len()!= 6 {
return Err(StyleParseError::UnspecifiedError.into());
}
return parse_hash_color(ident.as_bytes())
.map_err(|()| StyleParseError::UnspecifiedError.into());
}
ref t => {
return Err(BasicParseError::UnexpectedToken(t.clone()).into());
},
};
if value < 0 {
return Err(StyleParseError::UnspecifiedError.into());
}
let length = if value <= 9 {
1
} else if value <= 99 {
2
} else if value <= 999 {
3
} else if value <= 9999 {
4
} else if value <= 99999 {
5
} else if value <= 999999 {
6
} else {
return Err(StyleParseError::UnspecifiedError.into())
};
let total = length + unit.as_ref().map_or(0, |d| d.len());
if total > 6 {
return Err(StyleParseError::UnspecifiedError.into());
}
let mut serialization = [b'0'; 6];
let space_padding = 6 - total;
let mut written = space_padding;
written += itoa::write(&mut serialization[written..], value).unwrap();
if let Some(unit) = unit {
written += (&mut serialization[written..]).write(unit.as_bytes()).unwrap();
}
debug_assert!(written == 6);
parse_hash_color(&serialization).map_err(|()| StyleParseError::UnspecifiedError.into())
}
/// Returns false if the color is completely transparent, and
/// true otherwise.
pub fn is_non_transparent(&self) -> bool {
match *self {
Color::Numeric { ref parsed,.. } => parsed.alpha!= 0,
_ => true,
}
}
}
#[cfg(feature = "gecko")]
fn convert_nscolor_to_computedcolor(color: nscolor) -> ComputedColor {
use gecko::values::convert_nscolor_to_rgba;
ComputedColor::rgba(convert_nscolor_to_rgba(color))
}
impl ToComputedValue for Color {
type ComputedValue = ComputedColor;
fn to_computed_value(&self, context: &Context) -> ComputedColor { | match *self {
Color::CurrentColor => {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut()
.set_uncacheable();
}
}
ComputedColor::currentcolor()
}
Color::Numeric { ref parsed,.. } => ComputedColor::rgba(*parsed),
Color::Complex(ref complex) => {
if complex.foreground_ratio!= 0 {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut()
.set_uncacheable();
}
}
}
*complex
}
#[cfg(feature = "gecko")]
Color::System(system) =>
convert_nscolor_to_computedcolor(system.to_computed_value(context)),
#[cfg(feature = "gecko")]
Color::Special(special) => {
use self::gecko::SpecialColorKeyword as Keyword;
let pres_context = context.device().pres_context();
convert_nscolor_to_computedcolor(match special {
Keyword::MozDefaultColor => pres_context.mDefaultColor,
Keyword::MozDefaultBackgroundColor => pres_context.mBackgroundColor,
Keyword::MozHyperlinktext => pres_context.mLinkColor,
Keyword::MozActiveHyperlinktext => pres_context.mActiveLinkColor,
Keyword::MozVisitedHyperlinktext => pres_context.mVisitedLinkColor,
})
}
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => {
ComputedColor::rgba(context.device().body_text_color())
},
}
}
fn from_computed_value(computed: &ComputedColor) -> Self {
if computed.is_numeric() {
Color::rgba(computed.color)
} else if computed.is_currentcolor() {
Color::currentcolor()
} else {
Color::Complex(*computed)
}
}
}
/// Specified color value, but resolved to just RGBA for computed value
/// with value from color property at the same context.
#[derive(Clone, Debug, PartialEq, ToCss)]
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct RGBAColor(pub Color);
impl Parse for RGBAColor {
fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> {
Color::parse(context, input).map(RGBAColor)
}
}
impl ToComputedValue for RGBAColor {
type ComputedValue = RGBA;
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0.to_computed_value(context)
.to_rgba(context.style().get_color().clone_color())
}
fn from_computed_value(computed: &RGBA) -> Self {
RGBAColor(Color::rgba(*computed))
}
}
impl From<Color> for RGBAColor {
fn from(color: Color) -> RGBAColor {
RGBAColor(color)
}
}
/// Specified value for the "color" property, which resolves the `currentcolor`
/// keyword to the parent color instead of self's color.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, ToCss)]
pub struct ColorPropertyValue(pub Color);
impl ToComputedValue for ColorPropertyValue {
type ComputedValue = RGBA;
#[inline]
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0.to_computed_value(context)
.to_rgba(context.builder.get_parent_color().clone_color())
}
#[inline]
fn from_computed_value(computed: &RGBA) -> Self {
ColorPropertyValue(Color::rgba(*computed).into())
}
}
impl Parse for ColorPropertyValue {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse_quirky(context, input, AllowQuirks::Yes).map(ColorPropertyValue)
}
} | random_line_split |
|
lib.rs | #[macro_use]
extern crate nom;
use std::fmt::{Debug, Formatter, Result};
mod parser;
macro_rules! pp {
($msg:expr) => {{
println!("{:?}", $msg);
}};
}
mod Warc{
use parser;
use std::collections::HashMap;
pub fn parse(data: &String) -> Vec<Record> | break;
}
},
None => {
ended = true;
break;
}
}
}
pp!(next_new_line);
pp!(ended);
let (s, rest) =data_chars.split_at(next_new_line);
pp!(s);
pp!(rest);
data_chars = rest;
if ended{ break;}
}
return vec![Record::new(attr, Header::new(header), "".to_string())]
}
pub struct Record{
attributes: HashMap<String, String>,
header: Header,
content: String,
}
impl Record{
fn new(attributes: HashMap<String, String>, header: Header, content: String) -> Record {
Record{header: header, content: content, attributes: attributes}
}
}
pub struct Header{
fields: HashMap<String, String>,
}
impl Header{
fn new(fields: HashMap<String, String>) -> Header {
Header{fields: fields}
}
}
}
#[test]
fn it_debugs() {
let s= "s
s".to_string();
let warc = Warc::parse(&s);
}
| {
let mut header = HashMap::new();
let mut attr = HashMap::new();
let mut records: Vec<Record> = Vec::new();
let mut current_record: Option<Record> = None;
let mut data_chars_: Vec<char> = data.chars().collect();
let mut data_chars = &data_chars_[..];
let mut next_new_line = 0;
let mut ended = false;
let xx = &b"\n\nWARC/0.17\nWARC-Type: response\nWARC-Target-URI: dns:www.archive.org\nWARC-Date: 2008-04-30T20:48:25Z\nWARC-IP-Address: 68.87.76.178\nWARC-Record-ID: <urn:uuid:ff728363-2d5f-4f5f-b832-9552de1a6037>\nContent-Type: text/dns\nContent-Length: 56\n\n20080430204825\nwww.archive.org. 589 IN A 207.241.229.39\n\nWARC/0.17\nWARC-Type: response\nWARC-Target-URI: dns:www.archive.org\nWARC-Date: 2008-04-30T20:48:25Z\nWARC-IP-Address: 68.87.76.178\nWARC-Record-ID: <urn:uuid:becker>\nContent-Type: text/dns\nContent-Length: 56\n\n20080430204825\nwww.archive.org. 589 IN A 207.241.229.39\n\n"[..];
let v = parser::warc_records(xx);
pp!(v);
'outer: loop{
pp!(data_chars);
loop {
match data_chars.get(next_new_line) {
Some(n) => {
next_new_line = next_new_line+1;
if n.eq(&'\n'){ | identifier_body |
lib.rs | #[macro_use]
extern crate nom;
use std::fmt::{Debug, Formatter, Result};
mod parser;
macro_rules! pp {
($msg:expr) => {{
println!("{:?}", $msg);
}};
}
mod Warc{
use parser;
use std::collections::HashMap;
pub fn parse(data: &String) -> Vec<Record>{
let mut header = HashMap::new();
let mut attr = HashMap::new();
let mut records: Vec<Record> = Vec::new();
let mut current_record: Option<Record> = None;
let mut data_chars_: Vec<char> = data.chars().collect();
let mut data_chars = &data_chars_[..];
let mut next_new_line = 0;
let mut ended = false;
let xx = &b"\n\nWARC/0.17\nWARC-Type: response\nWARC-Target-URI: dns:www.archive.org\nWARC-Date: 2008-04-30T20:48:25Z\nWARC-IP-Address: 68.87.76.178\nWARC-Record-ID: <urn:uuid:ff728363-2d5f-4f5f-b832-9552de1a6037>\nContent-Type: text/dns\nContent-Length: 56\n\n20080430204825\nwww.archive.org. 589 IN A 207.241.229.39\n\nWARC/0.17\nWARC-Type: response\nWARC-Target-URI: dns:www.archive.org\nWARC-Date: 2008-04-30T20:48:25Z\nWARC-IP-Address: 68.87.76.178\nWARC-Record-ID: <urn:uuid:becker>\nContent-Type: text/dns\nContent-Length: 56\n\n20080430204825\nwww.archive.org. 589 IN A 207.241.229.39\n\n"[..];
let v = parser::warc_records(xx);
pp!(v);
'outer: loop{
pp!(data_chars);
loop {
match data_chars.get(next_new_line) {
Some(n) => {
next_new_line = next_new_line+1;
if n.eq(&'\n'){
break;
}
},
None => {
ended = true;
break;
}
}
}
pp!(next_new_line);
pp!(ended);
let (s, rest) =data_chars.split_at(next_new_line);
pp!(s); | pp!(rest);
data_chars = rest;
if ended{ break;}
}
return vec![Record::new(attr, Header::new(header), "".to_string())]
}
pub struct Record{
attributes: HashMap<String, String>,
header: Header,
content: String,
}
impl Record{
fn new(attributes: HashMap<String, String>, header: Header, content: String) -> Record {
Record{header: header, content: content, attributes: attributes}
}
}
pub struct Header{
fields: HashMap<String, String>,
}
impl Header{
fn new(fields: HashMap<String, String>) -> Header {
Header{fields: fields}
}
}
}
#[test]
fn it_debugs() {
let s= "s
s".to_string();
let warc = Warc::parse(&s);
} | random_line_split |
|
lib.rs | #[macro_use]
extern crate nom;
use std::fmt::{Debug, Formatter, Result};
mod parser;
macro_rules! pp {
($msg:expr) => {{
println!("{:?}", $msg);
}};
}
mod Warc{
use parser;
use std::collections::HashMap;
pub fn parse(data: &String) -> Vec<Record>{
let mut header = HashMap::new();
let mut attr = HashMap::new();
let mut records: Vec<Record> = Vec::new();
let mut current_record: Option<Record> = None;
let mut data_chars_: Vec<char> = data.chars().collect();
let mut data_chars = &data_chars_[..];
let mut next_new_line = 0;
let mut ended = false;
let xx = &b"\n\nWARC/0.17\nWARC-Type: response\nWARC-Target-URI: dns:www.archive.org\nWARC-Date: 2008-04-30T20:48:25Z\nWARC-IP-Address: 68.87.76.178\nWARC-Record-ID: <urn:uuid:ff728363-2d5f-4f5f-b832-9552de1a6037>\nContent-Type: text/dns\nContent-Length: 56\n\n20080430204825\nwww.archive.org. 589 IN A 207.241.229.39\n\nWARC/0.17\nWARC-Type: response\nWARC-Target-URI: dns:www.archive.org\nWARC-Date: 2008-04-30T20:48:25Z\nWARC-IP-Address: 68.87.76.178\nWARC-Record-ID: <urn:uuid:becker>\nContent-Type: text/dns\nContent-Length: 56\n\n20080430204825\nwww.archive.org. 589 IN A 207.241.229.39\n\n"[..];
let v = parser::warc_records(xx);
pp!(v);
'outer: loop{
pp!(data_chars);
loop {
match data_chars.get(next_new_line) {
Some(n) => {
next_new_line = next_new_line+1;
if n.eq(&'\n'){
break;
}
},
None => {
ended = true;
break;
}
}
}
pp!(next_new_line);
pp!(ended);
let (s, rest) =data_chars.split_at(next_new_line);
pp!(s);
pp!(rest);
data_chars = rest;
if ended |
}
return vec![Record::new(attr, Header::new(header), "".to_string())]
}
pub struct Record{
attributes: HashMap<String, String>,
header: Header,
content: String,
}
impl Record{
fn new(attributes: HashMap<String, String>, header: Header, content: String) -> Record {
Record{header: header, content: content, attributes: attributes}
}
}
pub struct Header{
fields: HashMap<String, String>,
}
impl Header{
fn new(fields: HashMap<String, String>) -> Header {
Header{fields: fields}
}
}
}
#[test]
fn it_debugs() {
let s= "s
s".to_string();
let warc = Warc::parse(&s);
}
| { break;} | conditional_block |
lib.rs | #[macro_use]
extern crate nom;
use std::fmt::{Debug, Formatter, Result};
mod parser;
macro_rules! pp {
($msg:expr) => {{
println!("{:?}", $msg);
}};
}
mod Warc{
use parser;
use std::collections::HashMap;
pub fn parse(data: &String) -> Vec<Record>{
let mut header = HashMap::new();
let mut attr = HashMap::new();
let mut records: Vec<Record> = Vec::new();
let mut current_record: Option<Record> = None;
let mut data_chars_: Vec<char> = data.chars().collect();
let mut data_chars = &data_chars_[..];
let mut next_new_line = 0;
let mut ended = false;
let xx = &b"\n\nWARC/0.17\nWARC-Type: response\nWARC-Target-URI: dns:www.archive.org\nWARC-Date: 2008-04-30T20:48:25Z\nWARC-IP-Address: 68.87.76.178\nWARC-Record-ID: <urn:uuid:ff728363-2d5f-4f5f-b832-9552de1a6037>\nContent-Type: text/dns\nContent-Length: 56\n\n20080430204825\nwww.archive.org. 589 IN A 207.241.229.39\n\nWARC/0.17\nWARC-Type: response\nWARC-Target-URI: dns:www.archive.org\nWARC-Date: 2008-04-30T20:48:25Z\nWARC-IP-Address: 68.87.76.178\nWARC-Record-ID: <urn:uuid:becker>\nContent-Type: text/dns\nContent-Length: 56\n\n20080430204825\nwww.archive.org. 589 IN A 207.241.229.39\n\n"[..];
let v = parser::warc_records(xx);
pp!(v);
'outer: loop{
pp!(data_chars);
loop {
match data_chars.get(next_new_line) {
Some(n) => {
next_new_line = next_new_line+1;
if n.eq(&'\n'){
break;
}
},
None => {
ended = true;
break;
}
}
}
pp!(next_new_line);
pp!(ended);
let (s, rest) =data_chars.split_at(next_new_line);
pp!(s);
pp!(rest);
data_chars = rest;
if ended{ break;}
}
return vec![Record::new(attr, Header::new(header), "".to_string())]
}
pub struct Record{
attributes: HashMap<String, String>,
header: Header,
content: String,
}
impl Record{
fn new(attributes: HashMap<String, String>, header: Header, content: String) -> Record {
Record{header: header, content: content, attributes: attributes}
}
}
pub struct Header{
fields: HashMap<String, String>,
}
impl Header{
fn new(fields: HashMap<String, String>) -> Header {
Header{fields: fields}
}
}
}
#[test]
fn | () {
let s= "s
s".to_string();
let warc = Warc::parse(&s);
}
| it_debugs | identifier_name |
aarch64.rs | pub type c_long = i64;
pub type c_ulong = u64;
pub type c_char = u8;
pub type ucontext_t = sigcontext;
s! {
pub struct sigcontext {
__sc_unused: ::c_int,
pub sc_mask: ::c_int,
pub sc_sp: ::c_ulong,
pub sc_lr: ::c_ulong,
pub sc_elr: ::c_ulong,
pub sc_spsr: ::c_ulong,
pub sc_x: [::c_ulong; 30],
pub sc_cookie: ::c_long,
}
}
// should be pub(crate), but that requires Rust 1.18.0
cfg_if! {
if #[cfg(libc_const_size_of)] {
#[doc(hidden)]
pub const _ALIGNBYTES: usize = ::mem::size_of::<::c_long>() - 1;
} else {
#[doc(hidden)] | pub const _MAX_PAGE_SHIFT: u32 = 12; | pub const _ALIGNBYTES: usize = 8 - 1;
}
}
| random_line_split |
observer.rs | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use engine_traits::KvEngine;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::*;
use tikv_util::worker::Scheduler;
use crate::cmd::lock_only_filter;
use crate::endpoint::Task;
use crate::metrics::RTS_CHANNEL_PENDING_CMD_BYTES;
pub struct Observer<E: KvEngine> {
scheduler: Scheduler<Task<E::Snapshot>>,
}
impl<E: KvEngine> Observer<E> {
pub fn new(scheduler: Scheduler<Task<E::Snapshot>>) -> Self {
Observer { scheduler }
}
pub fn register_to(&self, coprocessor_host: &mut CoprocessorHost<E>) {
// The `resolved-ts` cmd observer will `mem::take` the `Vec<CmdBatch>`, use a low priority
// to let it be the last observer and avoid affecting other observers
coprocessor_host
.registry
.register_cmd_observer(1000, BoxCmdObserver::new(self.clone()));
coprocessor_host
.registry
.register_role_observer(100, BoxRoleObserver::new(self.clone()));
coprocessor_host
.registry
.register_region_change_observer(100, BoxRegionChangeObserver::new(self.clone()));
}
}
impl<E: KvEngine> Clone for Observer<E> {
fn clone(&self) -> Self {
Self {
scheduler: self.scheduler.clone(),
}
}
}
impl<E: KvEngine> Coprocessor for Observer<E> {}
impl<E: KvEngine> CmdObserver<E> for Observer<E> {
fn on_flush_applied_cmd_batch(
&self,
max_level: ObserveLevel,
cmd_batches: &mut Vec<CmdBatch>,
_: &E,
) {
if max_level == ObserveLevel::None {
return;
}
let cmd_batches: Vec<_> = std::mem::take(cmd_batches)
.into_iter()
.filter_map(lock_only_filter)
.collect();
if cmd_batches.is_empty() {
return;
}
let size = cmd_batches.iter().map(|b| b.size()).sum::<usize>();
RTS_CHANNEL_PENDING_CMD_BYTES.add(size as i64);
if let Err(e) = self.scheduler.schedule(Task::ChangeLog {
cmd_batch: cmd_batches,
snapshot: None,
}) {
info!("failed to schedule change log event"; "err" =>?e);
}
}
fn on_applied_current_term(&self, role: StateRole, region: &Region) {
// Start to advance resolved ts after peer becomes leader and apply on its term
if role == StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::RegisterRegion {
region: region.clone(),
}) |
}
}
}
impl<E: KvEngine> RoleObserver for Observer<E> {
fn on_role_change(&self, ctx: &mut ObserverContext<'_>, role: StateRole) {
// Stop to advance resolved ts after peer steps down to follower or candidate.
// Do not need to check observe id because we expect all role change events are scheduled in order.
if role!= StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::DeRegisterRegion {
region_id: ctx.region().id,
}) {
info!("failed to schedule deregister region task"; "err" =>?e);
}
}
}
}
impl<E: KvEngine> RegionChangeObserver for Observer<E> {
fn on_region_changed(
&self,
ctx: &mut ObserverContext<'_>,
event: RegionChangeEvent,
role: StateRole,
) {
// If the peer is not leader, it must has not registered the observe region or it is deregistering
// the observe region, so don't need to send `RegionUpdated`/`RegionDestroyed` to update the observe
// region
if role!= StateRole::Leader {
return;
}
match event {
RegionChangeEvent::Create => {}
RegionChangeEvent::Update => {
if let Err(e) = self
.scheduler
.schedule(Task::RegionUpdated(ctx.region().clone()))
{
info!("failed to schedule region updated event"; "err" =>?e);
}
}
RegionChangeEvent::Destroy => {
if let RegionChangeEvent::Destroy = event {
if let Err(e) = self
.scheduler
.schedule(Task::RegionDestroyed(ctx.region().clone()))
{
info!("failed to schedule region destroyed event"; "err" =>?e);
}
}
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use engine_rocks::RocksSnapshot;
use engine_traits::{CF_DEFAULT, CF_LOCK, CF_WRITE};
use kvproto::metapb::Region;
use kvproto::raft_cmdpb::*;
use std::time::Duration;
use tikv::storage::kv::TestEngineBuilder;
use tikv_util::worker::{dummy_scheduler, ReceiverWrapper};
fn put_cf(cf: &str, key: &[u8], value: &[u8]) -> Request {
let mut cmd = Request::default();
cmd.set_cmd_type(CmdType::Put);
cmd.mut_put().set_cf(cf.to_owned());
cmd.mut_put().set_key(key.to_vec());
cmd.mut_put().set_value(value.to_vec());
cmd
}
fn expect_recv(rx: &mut ReceiverWrapper<Task<RocksSnapshot>>, data: Vec<Request>) {
if data.is_empty() {
match rx.recv_timeout(Duration::from_millis(10)) {
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => return,
_ => panic!("unexpected result"),
};
}
match rx.recv_timeout(Duration::from_millis(10)).unwrap().unwrap() {
Task::ChangeLog { cmd_batch,.. } => {
assert_eq!(cmd_batch.len(), 1);
assert_eq!(cmd_batch[0].len(), 1);
assert_eq!(&cmd_batch[0].cmds[0].request.get_requests(), &data);
}
_ => panic!("unexpected task"),
};
}
#[test]
fn test_observing() {
let (scheduler, mut rx) = dummy_scheduler();
let observer = Observer::new(scheduler);
let engine = TestEngineBuilder::new().build().unwrap().get_rocksdb();
let mut data = vec![
put_cf(CF_LOCK, b"k1", b"v"),
put_cf(CF_DEFAULT, b"k2", b"v"),
put_cf(CF_LOCK, b"k3", b"v"),
put_cf(CF_LOCK, b"k4", b"v"),
put_cf(CF_DEFAULT, b"k6", b"v"),
put_cf(CF_WRITE, b"k7", b"v"),
put_cf(CF_WRITE, b"k8", b"v"),
];
let mut cmd = Cmd::new(0, RaftCmdRequest::default(), RaftCmdResponse::default());
cmd.request.mut_requests().clear();
for put in &data {
cmd.request.mut_requests().push(put.clone());
}
// Both cdc and resolved-ts worker are observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe all data
expect_recv(&mut rx, data.clone());
// Only cdc is observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.rts_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Still observe all data
expect_recv(&mut rx, data.clone());
// Only resolved-ts worker is observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Only observe lock related data
data.retain(|p| p.get_put().cf!= CF_DEFAULT);
expect_recv(&mut rx, data);
// Both cdc and resolved-ts worker are not observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.rts_id.stop_observing();
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd);
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe no data
expect_recv(&mut rx, vec![]);
}
}
| {
info!("failed to schedule register region task"; "err" => ?e);
} | conditional_block |
observer.rs | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use engine_traits::KvEngine;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::*;
use tikv_util::worker::Scheduler;
use crate::cmd::lock_only_filter;
use crate::endpoint::Task;
use crate::metrics::RTS_CHANNEL_PENDING_CMD_BYTES;
pub struct Observer<E: KvEngine> {
scheduler: Scheduler<Task<E::Snapshot>>,
}
impl<E: KvEngine> Observer<E> {
pub fn new(scheduler: Scheduler<Task<E::Snapshot>>) -> Self {
Observer { scheduler }
}
pub fn register_to(&self, coprocessor_host: &mut CoprocessorHost<E>) {
// The `resolved-ts` cmd observer will `mem::take` the `Vec<CmdBatch>`, use a low priority
// to let it be the last observer and avoid affecting other observers
coprocessor_host
.registry
.register_cmd_observer(1000, BoxCmdObserver::new(self.clone()));
coprocessor_host
.registry
.register_role_observer(100, BoxRoleObserver::new(self.clone()));
coprocessor_host
.registry
.register_region_change_observer(100, BoxRegionChangeObserver::new(self.clone()));
}
}
impl<E: KvEngine> Clone for Observer<E> {
fn clone(&self) -> Self {
Self {
scheduler: self.scheduler.clone(),
}
}
}
impl<E: KvEngine> Coprocessor for Observer<E> {}
impl<E: KvEngine> CmdObserver<E> for Observer<E> {
fn on_flush_applied_cmd_batch(
&self,
max_level: ObserveLevel,
cmd_batches: &mut Vec<CmdBatch>,
_: &E,
) {
if max_level == ObserveLevel::None {
return;
}
let cmd_batches: Vec<_> = std::mem::take(cmd_batches)
.into_iter()
.filter_map(lock_only_filter)
.collect();
if cmd_batches.is_empty() {
return;
}
let size = cmd_batches.iter().map(|b| b.size()).sum::<usize>();
RTS_CHANNEL_PENDING_CMD_BYTES.add(size as i64);
if let Err(e) = self.scheduler.schedule(Task::ChangeLog {
cmd_batch: cmd_batches,
snapshot: None,
}) {
info!("failed to schedule change log event"; "err" =>?e);
}
}
fn on_applied_current_term(&self, role: StateRole, region: &Region) {
// Start to advance resolved ts after peer becomes leader and apply on its term
if role == StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::RegisterRegion {
region: region.clone(),
}) {
info!("failed to schedule register region task"; "err" =>?e);
}
}
}
}
impl<E: KvEngine> RoleObserver for Observer<E> {
fn on_role_change(&self, ctx: &mut ObserverContext<'_>, role: StateRole) {
// Stop to advance resolved ts after peer steps down to follower or candidate.
// Do not need to check observe id because we expect all role change events are scheduled in order.
if role!= StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::DeRegisterRegion {
region_id: ctx.region().id,
}) {
info!("failed to schedule deregister region task"; "err" =>?e);
}
}
}
}
impl<E: KvEngine> RegionChangeObserver for Observer<E> {
fn on_region_changed(
&self,
ctx: &mut ObserverContext<'_>,
event: RegionChangeEvent,
role: StateRole,
) {
// If the peer is not leader, it must has not registered the observe region or it is deregistering
// the observe region, so don't need to send `RegionUpdated`/`RegionDestroyed` to update the observe
// region
if role!= StateRole::Leader {
return;
}
match event {
RegionChangeEvent::Create => {}
RegionChangeEvent::Update => {
if let Err(e) = self
.scheduler
.schedule(Task::RegionUpdated(ctx.region().clone()))
{
info!("failed to schedule region updated event"; "err" =>?e);
}
}
RegionChangeEvent::Destroy => {
if let RegionChangeEvent::Destroy = event {
if let Err(e) = self
.scheduler
.schedule(Task::RegionDestroyed(ctx.region().clone()))
{
info!("failed to schedule region destroyed event"; "err" =>?e);
}
}
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use engine_rocks::RocksSnapshot;
use engine_traits::{CF_DEFAULT, CF_LOCK, CF_WRITE};
use kvproto::metapb::Region;
use kvproto::raft_cmdpb::*;
use std::time::Duration;
use tikv::storage::kv::TestEngineBuilder;
use tikv_util::worker::{dummy_scheduler, ReceiverWrapper};
fn put_cf(cf: &str, key: &[u8], value: &[u8]) -> Request {
let mut cmd = Request::default();
cmd.set_cmd_type(CmdType::Put);
cmd.mut_put().set_cf(cf.to_owned());
cmd.mut_put().set_key(key.to_vec());
cmd.mut_put().set_value(value.to_vec());
cmd
}
fn expect_recv(rx: &mut ReceiverWrapper<Task<RocksSnapshot>>, data: Vec<Request>) {
if data.is_empty() {
match rx.recv_timeout(Duration::from_millis(10)) {
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => return,
_ => panic!("unexpected result"),
};
}
match rx.recv_timeout(Duration::from_millis(10)).unwrap().unwrap() {
Task::ChangeLog { cmd_batch,.. } => {
assert_eq!(cmd_batch.len(), 1);
assert_eq!(cmd_batch[0].len(), 1);
assert_eq!(&cmd_batch[0].cmds[0].request.get_requests(), &data);
}
_ => panic!("unexpected task"),
};
}
#[test]
fn | () {
let (scheduler, mut rx) = dummy_scheduler();
let observer = Observer::new(scheduler);
let engine = TestEngineBuilder::new().build().unwrap().get_rocksdb();
let mut data = vec![
put_cf(CF_LOCK, b"k1", b"v"),
put_cf(CF_DEFAULT, b"k2", b"v"),
put_cf(CF_LOCK, b"k3", b"v"),
put_cf(CF_LOCK, b"k4", b"v"),
put_cf(CF_DEFAULT, b"k6", b"v"),
put_cf(CF_WRITE, b"k7", b"v"),
put_cf(CF_WRITE, b"k8", b"v"),
];
let mut cmd = Cmd::new(0, RaftCmdRequest::default(), RaftCmdResponse::default());
cmd.request.mut_requests().clear();
for put in &data {
cmd.request.mut_requests().push(put.clone());
}
// Both cdc and resolved-ts worker are observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe all data
expect_recv(&mut rx, data.clone());
// Only cdc is observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.rts_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Still observe all data
expect_recv(&mut rx, data.clone());
// Only resolved-ts worker is observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Only observe lock related data
data.retain(|p| p.get_put().cf!= CF_DEFAULT);
expect_recv(&mut rx, data);
// Both cdc and resolved-ts worker are not observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.rts_id.stop_observing();
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd);
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe no data
expect_recv(&mut rx, vec![]);
}
}
| test_observing | identifier_name |
observer.rs | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use engine_traits::KvEngine;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::*;
use tikv_util::worker::Scheduler;
use crate::cmd::lock_only_filter;
use crate::endpoint::Task;
use crate::metrics::RTS_CHANNEL_PENDING_CMD_BYTES;
pub struct Observer<E: KvEngine> {
scheduler: Scheduler<Task<E::Snapshot>>,
}
impl<E: KvEngine> Observer<E> {
pub fn new(scheduler: Scheduler<Task<E::Snapshot>>) -> Self {
Observer { scheduler }
}
pub fn register_to(&self, coprocessor_host: &mut CoprocessorHost<E>) |
}
impl<E: KvEngine> Clone for Observer<E> {
fn clone(&self) -> Self {
Self {
scheduler: self.scheduler.clone(),
}
}
}
impl<E: KvEngine> Coprocessor for Observer<E> {}
impl<E: KvEngine> CmdObserver<E> for Observer<E> {
fn on_flush_applied_cmd_batch(
&self,
max_level: ObserveLevel,
cmd_batches: &mut Vec<CmdBatch>,
_: &E,
) {
if max_level == ObserveLevel::None {
return;
}
let cmd_batches: Vec<_> = std::mem::take(cmd_batches)
.into_iter()
.filter_map(lock_only_filter)
.collect();
if cmd_batches.is_empty() {
return;
}
let size = cmd_batches.iter().map(|b| b.size()).sum::<usize>();
RTS_CHANNEL_PENDING_CMD_BYTES.add(size as i64);
if let Err(e) = self.scheduler.schedule(Task::ChangeLog {
cmd_batch: cmd_batches,
snapshot: None,
}) {
info!("failed to schedule change log event"; "err" =>?e);
}
}
fn on_applied_current_term(&self, role: StateRole, region: &Region) {
// Start to advance resolved ts after peer becomes leader and apply on its term
if role == StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::RegisterRegion {
region: region.clone(),
}) {
info!("failed to schedule register region task"; "err" =>?e);
}
}
}
}
impl<E: KvEngine> RoleObserver for Observer<E> {
fn on_role_change(&self, ctx: &mut ObserverContext<'_>, role: StateRole) {
// Stop to advance resolved ts after peer steps down to follower or candidate.
// Do not need to check observe id because we expect all role change events are scheduled in order.
if role!= StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::DeRegisterRegion {
region_id: ctx.region().id,
}) {
info!("failed to schedule deregister region task"; "err" =>?e);
}
}
}
}
impl<E: KvEngine> RegionChangeObserver for Observer<E> {
fn on_region_changed(
&self,
ctx: &mut ObserverContext<'_>,
event: RegionChangeEvent,
role: StateRole,
) {
// If the peer is not leader, it must has not registered the observe region or it is deregistering
// the observe region, so don't need to send `RegionUpdated`/`RegionDestroyed` to update the observe
// region
if role!= StateRole::Leader {
return;
}
match event {
RegionChangeEvent::Create => {}
RegionChangeEvent::Update => {
if let Err(e) = self
.scheduler
.schedule(Task::RegionUpdated(ctx.region().clone()))
{
info!("failed to schedule region updated event"; "err" =>?e);
}
}
RegionChangeEvent::Destroy => {
if let RegionChangeEvent::Destroy = event {
if let Err(e) = self
.scheduler
.schedule(Task::RegionDestroyed(ctx.region().clone()))
{
info!("failed to schedule region destroyed event"; "err" =>?e);
}
}
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use engine_rocks::RocksSnapshot;
use engine_traits::{CF_DEFAULT, CF_LOCK, CF_WRITE};
use kvproto::metapb::Region;
use kvproto::raft_cmdpb::*;
use std::time::Duration;
use tikv::storage::kv::TestEngineBuilder;
use tikv_util::worker::{dummy_scheduler, ReceiverWrapper};
fn put_cf(cf: &str, key: &[u8], value: &[u8]) -> Request {
let mut cmd = Request::default();
cmd.set_cmd_type(CmdType::Put);
cmd.mut_put().set_cf(cf.to_owned());
cmd.mut_put().set_key(key.to_vec());
cmd.mut_put().set_value(value.to_vec());
cmd
}
fn expect_recv(rx: &mut ReceiverWrapper<Task<RocksSnapshot>>, data: Vec<Request>) {
if data.is_empty() {
match rx.recv_timeout(Duration::from_millis(10)) {
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => return,
_ => panic!("unexpected result"),
};
}
match rx.recv_timeout(Duration::from_millis(10)).unwrap().unwrap() {
Task::ChangeLog { cmd_batch,.. } => {
assert_eq!(cmd_batch.len(), 1);
assert_eq!(cmd_batch[0].len(), 1);
assert_eq!(&cmd_batch[0].cmds[0].request.get_requests(), &data);
}
_ => panic!("unexpected task"),
};
}
#[test]
fn test_observing() {
let (scheduler, mut rx) = dummy_scheduler();
let observer = Observer::new(scheduler);
let engine = TestEngineBuilder::new().build().unwrap().get_rocksdb();
let mut data = vec![
put_cf(CF_LOCK, b"k1", b"v"),
put_cf(CF_DEFAULT, b"k2", b"v"),
put_cf(CF_LOCK, b"k3", b"v"),
put_cf(CF_LOCK, b"k4", b"v"),
put_cf(CF_DEFAULT, b"k6", b"v"),
put_cf(CF_WRITE, b"k7", b"v"),
put_cf(CF_WRITE, b"k8", b"v"),
];
let mut cmd = Cmd::new(0, RaftCmdRequest::default(), RaftCmdResponse::default());
cmd.request.mut_requests().clear();
for put in &data {
cmd.request.mut_requests().push(put.clone());
}
// Both cdc and resolved-ts worker are observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe all data
expect_recv(&mut rx, data.clone());
// Only cdc is observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.rts_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Still observe all data
expect_recv(&mut rx, data.clone());
// Only resolved-ts worker is observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Only observe lock related data
data.retain(|p| p.get_put().cf!= CF_DEFAULT);
expect_recv(&mut rx, data);
// Both cdc and resolved-ts worker are not observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.rts_id.stop_observing();
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd);
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe no data
expect_recv(&mut rx, vec![]);
}
}
| {
// The `resolved-ts` cmd observer will `mem::take` the `Vec<CmdBatch>`, use a low priority
// to let it be the last observer and avoid affecting other observers
coprocessor_host
.registry
.register_cmd_observer(1000, BoxCmdObserver::new(self.clone()));
coprocessor_host
.registry
.register_role_observer(100, BoxRoleObserver::new(self.clone()));
coprocessor_host
.registry
.register_region_change_observer(100, BoxRegionChangeObserver::new(self.clone()));
} | identifier_body |
observer.rs | // Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0.
use engine_traits::KvEngine;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::*;
use tikv_util::worker::Scheduler;
use crate::cmd::lock_only_filter;
use crate::endpoint::Task;
use crate::metrics::RTS_CHANNEL_PENDING_CMD_BYTES;
pub struct Observer<E: KvEngine> {
scheduler: Scheduler<Task<E::Snapshot>>,
}
impl<E: KvEngine> Observer<E> {
pub fn new(scheduler: Scheduler<Task<E::Snapshot>>) -> Self {
Observer { scheduler }
}
pub fn register_to(&self, coprocessor_host: &mut CoprocessorHost<E>) {
// The `resolved-ts` cmd observer will `mem::take` the `Vec<CmdBatch>`, use a low priority
// to let it be the last observer and avoid affecting other observers
coprocessor_host
.registry
.register_cmd_observer(1000, BoxCmdObserver::new(self.clone()));
coprocessor_host
.registry
.register_role_observer(100, BoxRoleObserver::new(self.clone()));
coprocessor_host
.registry
.register_region_change_observer(100, BoxRegionChangeObserver::new(self.clone()));
}
}
impl<E: KvEngine> Clone for Observer<E> {
fn clone(&self) -> Self {
Self {
scheduler: self.scheduler.clone(),
}
}
}
impl<E: KvEngine> Coprocessor for Observer<E> {}
impl<E: KvEngine> CmdObserver<E> for Observer<E> {
fn on_flush_applied_cmd_batch(
&self,
max_level: ObserveLevel,
cmd_batches: &mut Vec<CmdBatch>,
_: &E,
) {
if max_level == ObserveLevel::None {
return;
}
let cmd_batches: Vec<_> = std::mem::take(cmd_batches)
.into_iter()
.filter_map(lock_only_filter)
.collect();
if cmd_batches.is_empty() {
return;
} | cmd_batch: cmd_batches,
snapshot: None,
}) {
info!("failed to schedule change log event"; "err" =>?e);
}
}
fn on_applied_current_term(&self, role: StateRole, region: &Region) {
// Start to advance resolved ts after peer becomes leader and apply on its term
if role == StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::RegisterRegion {
region: region.clone(),
}) {
info!("failed to schedule register region task"; "err" =>?e);
}
}
}
}
impl<E: KvEngine> RoleObserver for Observer<E> {
fn on_role_change(&self, ctx: &mut ObserverContext<'_>, role: StateRole) {
// Stop to advance resolved ts after peer steps down to follower or candidate.
// Do not need to check observe id because we expect all role change events are scheduled in order.
if role!= StateRole::Leader {
if let Err(e) = self.scheduler.schedule(Task::DeRegisterRegion {
region_id: ctx.region().id,
}) {
info!("failed to schedule deregister region task"; "err" =>?e);
}
}
}
}
impl<E: KvEngine> RegionChangeObserver for Observer<E> {
fn on_region_changed(
&self,
ctx: &mut ObserverContext<'_>,
event: RegionChangeEvent,
role: StateRole,
) {
// If the peer is not leader, it must has not registered the observe region or it is deregistering
// the observe region, so don't need to send `RegionUpdated`/`RegionDestroyed` to update the observe
// region
if role!= StateRole::Leader {
return;
}
match event {
RegionChangeEvent::Create => {}
RegionChangeEvent::Update => {
if let Err(e) = self
.scheduler
.schedule(Task::RegionUpdated(ctx.region().clone()))
{
info!("failed to schedule region updated event"; "err" =>?e);
}
}
RegionChangeEvent::Destroy => {
if let RegionChangeEvent::Destroy = event {
if let Err(e) = self
.scheduler
.schedule(Task::RegionDestroyed(ctx.region().clone()))
{
info!("failed to schedule region destroyed event"; "err" =>?e);
}
}
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use engine_rocks::RocksSnapshot;
use engine_traits::{CF_DEFAULT, CF_LOCK, CF_WRITE};
use kvproto::metapb::Region;
use kvproto::raft_cmdpb::*;
use std::time::Duration;
use tikv::storage::kv::TestEngineBuilder;
use tikv_util::worker::{dummy_scheduler, ReceiverWrapper};
fn put_cf(cf: &str, key: &[u8], value: &[u8]) -> Request {
let mut cmd = Request::default();
cmd.set_cmd_type(CmdType::Put);
cmd.mut_put().set_cf(cf.to_owned());
cmd.mut_put().set_key(key.to_vec());
cmd.mut_put().set_value(value.to_vec());
cmd
}
fn expect_recv(rx: &mut ReceiverWrapper<Task<RocksSnapshot>>, data: Vec<Request>) {
if data.is_empty() {
match rx.recv_timeout(Duration::from_millis(10)) {
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => return,
_ => panic!("unexpected result"),
};
}
match rx.recv_timeout(Duration::from_millis(10)).unwrap().unwrap() {
Task::ChangeLog { cmd_batch,.. } => {
assert_eq!(cmd_batch.len(), 1);
assert_eq!(cmd_batch[0].len(), 1);
assert_eq!(&cmd_batch[0].cmds[0].request.get_requests(), &data);
}
_ => panic!("unexpected task"),
};
}
#[test]
fn test_observing() {
let (scheduler, mut rx) = dummy_scheduler();
let observer = Observer::new(scheduler);
let engine = TestEngineBuilder::new().build().unwrap().get_rocksdb();
let mut data = vec![
put_cf(CF_LOCK, b"k1", b"v"),
put_cf(CF_DEFAULT, b"k2", b"v"),
put_cf(CF_LOCK, b"k3", b"v"),
put_cf(CF_LOCK, b"k4", b"v"),
put_cf(CF_DEFAULT, b"k6", b"v"),
put_cf(CF_WRITE, b"k7", b"v"),
put_cf(CF_WRITE, b"k8", b"v"),
];
let mut cmd = Cmd::new(0, RaftCmdRequest::default(), RaftCmdResponse::default());
cmd.request.mut_requests().clear();
for put in &data {
cmd.request.mut_requests().push(put.clone());
}
// Both cdc and resolved-ts worker are observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe all data
expect_recv(&mut rx, data.clone());
// Only cdc is observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.rts_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Still observe all data
expect_recv(&mut rx, data.clone());
// Only resolved-ts worker is observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd.clone());
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Only observe lock related data
data.retain(|p| p.get_put().cf!= CF_DEFAULT);
expect_recv(&mut rx, data);
// Both cdc and resolved-ts worker are not observing
let observe_info = CmdObserveInfo::from_handle(ObserveHandle::new(), ObserveHandle::new());
observe_info.rts_id.stop_observing();
observe_info.cdc_id.stop_observing();
let mut cb = CmdBatch::new(&observe_info, Region::default());
cb.push(&observe_info, 0, cmd);
observer.on_flush_applied_cmd_batch(cb.level, &mut vec![cb], &engine);
// Observe no data
expect_recv(&mut rx, vec![]);
}
} | let size = cmd_batches.iter().map(|b| b.size()).sum::<usize>();
RTS_CHANNEL_PENDING_CMD_BYTES.add(size as i64);
if let Err(e) = self.scheduler.schedule(Task::ChangeLog { | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
extern crate env_logger;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gleam;
extern crate log;
pub extern crate bluetooth;
pub extern crate bluetooth_traits;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate debugger;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate servo_config;
pub extern crate servo_geometry;
pub extern crate servo_url;
pub extern crate style;
pub extern crate style_traits;
pub extern crate webrender_traits;
pub extern crate webvr;
pub extern crate webvr_traits;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use bluetooth::BluetoothThreadFactory;
use bluetooth_traits::BluetoothRequest;
use compositing::{CompositorProxy, IOCompositor};
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
use constellation::{FromCompositorLogger, FromScriptLogger};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use env_logger::Logger as EnvLogger;
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use log::{Log, LogMetadata, LogRecord};
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{ConstellationMsg, SWManagerSenders, ScriptMsg};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_config::resource_files::resources_dir_path;
use servo_url::ServoUrl;
use std::borrow::Cow;
use std::cmp::max;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use webvr::{WebVRThread, WebVRCompositorHandler};
pub use gleam::gl;
pub use servo_config as config;
pub use servo_url as url;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
constellation_chan: Sender<ConstellationMsg>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let debugger_chan = opts.debugger_port.map(|port| {
debugger::start_server(port)
});
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let mut resource_path = resources_dir_path().unwrap();
resource_path.push("shaders");
let (webrender, webrender_api_sender) = {
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.scale_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let renderer_kind = if opts::get().should_use_osmesa() {
webrender_traits::RendererKind::OSMesa
} else {
webrender_traits::RendererKind::Native
};
let recorder = if opts.webrender_record {
let record_path = PathBuf::from("wr-record.bin");
let recorder = Box::new(webrender::BinaryRecorder::new(&record_path));
Some(recorder as Box<webrender::ApiRecordingReceiver>)
} else {
None
};
webrender::Renderer::new(webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_override_path: Some(resource_path),
enable_aa: opts.enable_text_antialiasing,
enable_profiler: opts.webrender_stats,
debug: opts.webrender_debug,
recorder: recorder,
precache_shaders: opts.precache_shaders,
enable_scrollbars: opts.output_file.is_none(),
renderer_kind: renderer_kind,
enable_subpixel_aa: opts.enable_subpixel_text_antialiasing,
clear_framebuffer: true,
clear_color: webrender_traits::ColorF::new(1.0, 1.0, 1.0, 1.0),
render_target_debug: false,
workers: None,
}).expect("Unable to initialize webrender!")
};
// Important that this call is done in a single-threaded fashion, we
// can't defer it after `create_constellation` has started.
script::init();
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let (constellation_chan, sw_senders) = create_constellation(opts.user_agent.clone(),
opts.config_dir.clone(),
opts.url.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
debugger_chan,
devtools_chan,
supports_clipboard,
&webrender,
webrender_api_sender.clone());
// Send the constellation's swmanager sender to service worker manager thread
script::init_service_workers(sw_senders);
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan.clone(),
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
constellation_chan: constellation_chan,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
pub fn setup_logging(&self) {
let constellation_chan = self.constellation_chan.clone();
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromCompositorLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
}
fn create_constellation(user_agent: Cow<'static, str>,
config_dir: Option<PathBuf>,
url: Option<ServoUrl>,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
debugger_chan: Option<debugger::Sender>,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender: &webrender::Renderer,
webrender_api_sender: webrender_traits::RenderApiSender)
-> (Sender<ConstellationMsg>, SWManagerSenders) {
let bluetooth_thread: IpcSender<BluetoothRequest> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(user_agent,
devtools_chan.clone(),
time_profiler_chan.clone(),
config_dir);
let image_cache_thread = new_image_cache_thread(public_resource_threads.sender(),
webrender_api_sender.create_api());
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
Some(webrender_api_sender.create_api()));
let resource_sender = public_resource_threads.sender();
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
debugger_chan: debugger_chan,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let (constellation_chan, from_swmanager_sender) =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
if PREFS.is_webvr_enabled() {
// WebVR initialization
let (mut handler, sender) = WebVRCompositorHandler::new();
let webvr_thread = WebVRThread::spawn(constellation_chan.clone(), sender);
handler.set_webvr_thread_sender(webvr_thread.clone());
webrender.set_vr_compositor_handler(handler);
constellation_chan.send(ConstellationMsg::SetWebVRThread(webvr_thread)).unwrap();
}
if let Some(url) = url {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
};
// channels to communicate with Service Worker Manager
let sw_senders = SWManagerSenders {
swmanager_sender: from_swmanager_sender,
resource_sender: resource_sender
};
(constellation_chan, sw_senders)
}
// A logger that logs to two downstream loggers.
// This should probably be in the log crate.
struct BothLogger<Log1, Log2>(Log1, Log2);
impl<Log1, Log2> Log for BothLogger<Log1, Log2> where Log1: Log, Log2: Log {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.0.enabled(metadata) || self.1.enabled(metadata)
}
fn log(&self, record: &LogRecord) {
self.0.log(record);
self.1.log(record);
}
}
pub fn set_logger(constellation_chan: IpcSender<ScriptMsg>) {
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromScriptLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
PREFS.extend(unprivileged_content.prefs());
set_logger(unprivileged_content.constellation_chan());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
// send the required channels to the service worker manager
let sw_senders = unprivileged_content.swmanager_senders();
script::init();
script::init_service_workers(sw_senders);
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn | () {
panic!("Sandboxing is not supported on Windows.");
}
| create_sandbox | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
extern crate env_logger;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gleam;
extern crate log;
pub extern crate bluetooth;
pub extern crate bluetooth_traits;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate debugger;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate servo_config;
pub extern crate servo_geometry;
pub extern crate servo_url;
pub extern crate style;
pub extern crate style_traits;
pub extern crate webrender_traits;
pub extern crate webvr;
pub extern crate webvr_traits;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use bluetooth::BluetoothThreadFactory;
use bluetooth_traits::BluetoothRequest;
use compositing::{CompositorProxy, IOCompositor};
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
use constellation::{FromCompositorLogger, FromScriptLogger};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use env_logger::Logger as EnvLogger;
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use log::{Log, LogMetadata, LogRecord};
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{ConstellationMsg, SWManagerSenders, ScriptMsg};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_config::resource_files::resources_dir_path;
use servo_url::ServoUrl;
use std::borrow::Cow;
use std::cmp::max;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use webvr::{WebVRThread, WebVRCompositorHandler};
pub use gleam::gl;
pub use servo_config as config;
pub use servo_url as url;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
constellation_chan: Sender<ConstellationMsg>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let debugger_chan = opts.debugger_port.map(|port| {
debugger::start_server(port)
});
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let mut resource_path = resources_dir_path().unwrap();
resource_path.push("shaders");
let (webrender, webrender_api_sender) = {
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.scale_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let renderer_kind = if opts::get().should_use_osmesa() {
webrender_traits::RendererKind::OSMesa
} else {
webrender_traits::RendererKind::Native
};
let recorder = if opts.webrender_record {
let record_path = PathBuf::from("wr-record.bin");
let recorder = Box::new(webrender::BinaryRecorder::new(&record_path));
Some(recorder as Box<webrender::ApiRecordingReceiver>)
} else {
None
};
webrender::Renderer::new(webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_override_path: Some(resource_path),
enable_aa: opts.enable_text_antialiasing,
enable_profiler: opts.webrender_stats,
debug: opts.webrender_debug,
recorder: recorder,
precache_shaders: opts.precache_shaders,
enable_scrollbars: opts.output_file.is_none(),
renderer_kind: renderer_kind,
enable_subpixel_aa: opts.enable_subpixel_text_antialiasing,
clear_framebuffer: true,
clear_color: webrender_traits::ColorF::new(1.0, 1.0, 1.0, 1.0),
render_target_debug: false,
workers: None,
}).expect("Unable to initialize webrender!")
};
// Important that this call is done in a single-threaded fashion, we
// can't defer it after `create_constellation` has started.
script::init();
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let (constellation_chan, sw_senders) = create_constellation(opts.user_agent.clone(),
opts.config_dir.clone(),
opts.url.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
debugger_chan,
devtools_chan,
supports_clipboard,
&webrender,
webrender_api_sender.clone());
// Send the constellation's swmanager sender to service worker manager thread
script::init_service_workers(sw_senders);
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan.clone(),
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
constellation_chan: constellation_chan,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 |
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
pub fn setup_logging(&self) {
let constellation_chan = self.constellation_chan.clone();
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromCompositorLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
}
fn create_constellation(user_agent: Cow<'static, str>,
config_dir: Option<PathBuf>,
url: Option<ServoUrl>,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
debugger_chan: Option<debugger::Sender>,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender: &webrender::Renderer,
webrender_api_sender: webrender_traits::RenderApiSender)
-> (Sender<ConstellationMsg>, SWManagerSenders) {
let bluetooth_thread: IpcSender<BluetoothRequest> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(user_agent,
devtools_chan.clone(),
time_profiler_chan.clone(),
config_dir);
let image_cache_thread = new_image_cache_thread(public_resource_threads.sender(),
webrender_api_sender.create_api());
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
Some(webrender_api_sender.create_api()));
let resource_sender = public_resource_threads.sender();
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
debugger_chan: debugger_chan,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let (constellation_chan, from_swmanager_sender) =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
if PREFS.is_webvr_enabled() {
// WebVR initialization
let (mut handler, sender) = WebVRCompositorHandler::new();
let webvr_thread = WebVRThread::spawn(constellation_chan.clone(), sender);
handler.set_webvr_thread_sender(webvr_thread.clone());
webrender.set_vr_compositor_handler(handler);
constellation_chan.send(ConstellationMsg::SetWebVRThread(webvr_thread)).unwrap();
}
if let Some(url) = url {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
};
// channels to communicate with Service Worker Manager
let sw_senders = SWManagerSenders {
swmanager_sender: from_swmanager_sender,
resource_sender: resource_sender
};
(constellation_chan, sw_senders)
}
// A logger that logs to two downstream loggers.
// This should probably be in the log crate.
struct BothLogger<Log1, Log2>(Log1, Log2);
impl<Log1, Log2> Log for BothLogger<Log1, Log2> where Log1: Log, Log2: Log {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.0.enabled(metadata) || self.1.enabled(metadata)
}
fn log(&self, record: &LogRecord) {
self.0.log(record);
self.1.log(record);
}
}
pub fn set_logger(constellation_chan: IpcSender<ScriptMsg>) {
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromScriptLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
PREFS.extend(unprivileged_content.prefs());
set_logger(unprivileged_content.constellation_chan());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
// send the required channels to the service worker manager
let sw_senders = unprivileged_content.swmanager_senders();
script::init();
script::init_service_workers(sw_senders);
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
}
| {
self.compositor.pinch_zoom_level()
} | identifier_body |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
| //! Servo, the mighty web browser engine from the future.
//!
//! This is a very simple library that wires all of Servo's components
//! together as type `Browser`, along with a generic client
//! implementing the `WindowMethods` trait, to create a working web
//! browser.
//!
//! The `Browser` type is responsible for configuring a
//! `Constellation`, which does the heavy lifting of coordinating all
//! of Servo's internal subsystems, including the `ScriptThread` and the
//! `LayoutThread`, as well maintains the navigation context.
//!
//! The `Browser` is fed events from a generic type that implements the
//! `WindowMethods` trait.
extern crate env_logger;
#[cfg(not(target_os = "windows"))]
extern crate gaol;
extern crate gleam;
extern crate log;
pub extern crate bluetooth;
pub extern crate bluetooth_traits;
pub extern crate canvas;
pub extern crate canvas_traits;
pub extern crate compositing;
pub extern crate constellation;
pub extern crate debugger;
pub extern crate devtools;
pub extern crate devtools_traits;
pub extern crate euclid;
pub extern crate gfx;
pub extern crate ipc_channel;
pub extern crate layout_thread;
pub extern crate msg;
pub extern crate net;
pub extern crate net_traits;
pub extern crate profile;
pub extern crate profile_traits;
pub extern crate script;
pub extern crate script_traits;
pub extern crate script_layout_interface;
pub extern crate servo_config;
pub extern crate servo_geometry;
pub extern crate servo_url;
pub extern crate style;
pub extern crate style_traits;
pub extern crate webrender_traits;
pub extern crate webvr;
pub extern crate webvr_traits;
#[cfg(feature = "webdriver")]
extern crate webdriver_server;
extern crate webrender;
#[cfg(feature = "webdriver")]
fn webdriver(port: u16, constellation: Sender<ConstellationMsg>) {
webdriver_server::start_server(port, constellation);
}
#[cfg(not(feature = "webdriver"))]
fn webdriver(_port: u16, _constellation: Sender<ConstellationMsg>) { }
use bluetooth::BluetoothThreadFactory;
use bluetooth_traits::BluetoothRequest;
use compositing::{CompositorProxy, IOCompositor};
use compositing::compositor_thread::InitialCompositorState;
use compositing::windowing::WindowEvent;
use compositing::windowing::WindowMethods;
use constellation::{Constellation, InitialConstellationState, UnprivilegedPipelineContent};
use constellation::{FromCompositorLogger, FromScriptLogger};
#[cfg(not(target_os = "windows"))]
use constellation::content_process_sandbox_profile;
use env_logger::Logger as EnvLogger;
#[cfg(not(target_os = "windows"))]
use gaol::sandbox::{ChildSandbox, ChildSandboxMethods};
use gfx::font_cache_thread::FontCacheThread;
use ipc_channel::ipc::{self, IpcSender};
use log::{Log, LogMetadata, LogRecord};
use net::image_cache_thread::new_image_cache_thread;
use net::resource_thread::new_resource_threads;
use net_traits::IpcSend;
use profile::mem as profile_mem;
use profile::time as profile_time;
use profile_traits::mem;
use profile_traits::time;
use script_traits::{ConstellationMsg, SWManagerSenders, ScriptMsg};
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_config::resource_files::resources_dir_path;
use servo_url::ServoUrl;
use std::borrow::Cow;
use std::cmp::max;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use webvr::{WebVRThread, WebVRCompositorHandler};
pub use gleam::gl;
pub use servo_config as config;
pub use servo_url as url;
/// The in-process interface to Servo.
///
/// It does everything necessary to render the web, primarily
/// orchestrating the interaction between JavaScript, CSS layout,
/// rendering, and the client window.
///
/// Clients create a `Browser` for a given reference-counted type
/// implementing `WindowMethods`, which is the bridge to whatever
/// application Servo is embedded in. Clients then create an event
/// loop to pump messages between the embedding application and
/// various browser components.
pub struct Browser<Window: WindowMethods +'static> {
compositor: IOCompositor<Window>,
constellation_chan: Sender<ConstellationMsg>,
}
impl<Window> Browser<Window> where Window: WindowMethods +'static {
pub fn new(window: Rc<Window>) -> Browser<Window> {
// Global configuration options, parsed from the command line.
let opts = opts::get();
// Get both endpoints of a special channel for communication between
// the client window and the compositor. This channel is unique because
// messages to client may need to pump a platform-specific event loop
// to deliver the message.
let (compositor_proxy, compositor_receiver) =
window.create_compositor_channel();
let supports_clipboard = window.supports_clipboard();
let time_profiler_chan = profile_time::Profiler::create(&opts.time_profiling,
opts.time_profiler_trace_path.clone());
let mem_profiler_chan = profile_mem::Profiler::create(opts.mem_profiler_period);
let debugger_chan = opts.debugger_port.map(|port| {
debugger::start_server(port)
});
let devtools_chan = opts.devtools_port.map(|port| {
devtools::start_server(port)
});
let mut resource_path = resources_dir_path().unwrap();
resource_path.push("shaders");
let (webrender, webrender_api_sender) = {
// TODO(gw): Duplicates device_pixels_per_screen_px from compositor. Tidy up!
let scale_factor = window.scale_factor().get();
let device_pixel_ratio = match opts.device_pixels_per_px {
Some(device_pixels_per_px) => device_pixels_per_px,
None => match opts.output_file {
Some(_) => 1.0,
None => scale_factor,
}
};
let renderer_kind = if opts::get().should_use_osmesa() {
webrender_traits::RendererKind::OSMesa
} else {
webrender_traits::RendererKind::Native
};
let recorder = if opts.webrender_record {
let record_path = PathBuf::from("wr-record.bin");
let recorder = Box::new(webrender::BinaryRecorder::new(&record_path));
Some(recorder as Box<webrender::ApiRecordingReceiver>)
} else {
None
};
webrender::Renderer::new(webrender::RendererOptions {
device_pixel_ratio: device_pixel_ratio,
resource_override_path: Some(resource_path),
enable_aa: opts.enable_text_antialiasing,
enable_profiler: opts.webrender_stats,
debug: opts.webrender_debug,
recorder: recorder,
precache_shaders: opts.precache_shaders,
enable_scrollbars: opts.output_file.is_none(),
renderer_kind: renderer_kind,
enable_subpixel_aa: opts.enable_subpixel_text_antialiasing,
clear_framebuffer: true,
clear_color: webrender_traits::ColorF::new(1.0, 1.0, 1.0, 1.0),
render_target_debug: false,
workers: None,
}).expect("Unable to initialize webrender!")
};
// Important that this call is done in a single-threaded fashion, we
// can't defer it after `create_constellation` has started.
script::init();
// Create the constellation, which maintains the engine
// pipelines, including the script and layout threads, as well
// as the navigation context.
let (constellation_chan, sw_senders) = create_constellation(opts.user_agent.clone(),
opts.config_dir.clone(),
opts.url.clone(),
compositor_proxy.clone_compositor_proxy(),
time_profiler_chan.clone(),
mem_profiler_chan.clone(),
debugger_chan,
devtools_chan,
supports_clipboard,
&webrender,
webrender_api_sender.clone());
// Send the constellation's swmanager sender to service worker manager thread
script::init_service_workers(sw_senders);
if cfg!(feature = "webdriver") {
if let Some(port) = opts.webdriver_port {
webdriver(port, constellation_chan.clone());
}
}
// The compositor coordinates with the client window to create the final
// rendered page and display it somewhere.
let compositor = IOCompositor::create(window, InitialCompositorState {
sender: compositor_proxy,
receiver: compositor_receiver,
constellation_chan: constellation_chan.clone(),
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
webrender: webrender,
webrender_api_sender: webrender_api_sender,
});
Browser {
compositor: compositor,
constellation_chan: constellation_chan,
}
}
pub fn handle_events(&mut self, events: Vec<WindowEvent>) -> bool {
self.compositor.handle_events(events)
}
pub fn repaint_synchronously(&mut self) {
self.compositor.repaint_synchronously()
}
pub fn pinch_zoom_level(&self) -> f32 {
self.compositor.pinch_zoom_level()
}
pub fn request_title_for_main_frame(&self) {
self.compositor.title_for_main_frame()
}
pub fn setup_logging(&self) {
let constellation_chan = self.constellation_chan.clone();
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromCompositorLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
}
fn create_constellation(user_agent: Cow<'static, str>,
config_dir: Option<PathBuf>,
url: Option<ServoUrl>,
compositor_proxy: Box<CompositorProxy + Send>,
time_profiler_chan: time::ProfilerChan,
mem_profiler_chan: mem::ProfilerChan,
debugger_chan: Option<debugger::Sender>,
devtools_chan: Option<Sender<devtools_traits::DevtoolsControlMsg>>,
supports_clipboard: bool,
webrender: &webrender::Renderer,
webrender_api_sender: webrender_traits::RenderApiSender)
-> (Sender<ConstellationMsg>, SWManagerSenders) {
let bluetooth_thread: IpcSender<BluetoothRequest> = BluetoothThreadFactory::new();
let (public_resource_threads, private_resource_threads) =
new_resource_threads(user_agent,
devtools_chan.clone(),
time_profiler_chan.clone(),
config_dir);
let image_cache_thread = new_image_cache_thread(public_resource_threads.sender(),
webrender_api_sender.create_api());
let font_cache_thread = FontCacheThread::new(public_resource_threads.sender(),
Some(webrender_api_sender.create_api()));
let resource_sender = public_resource_threads.sender();
let initial_state = InitialConstellationState {
compositor_proxy: compositor_proxy,
debugger_chan: debugger_chan,
devtools_chan: devtools_chan,
bluetooth_thread: bluetooth_thread,
image_cache_thread: image_cache_thread,
font_cache_thread: font_cache_thread,
public_resource_threads: public_resource_threads,
private_resource_threads: private_resource_threads,
time_profiler_chan: time_profiler_chan,
mem_profiler_chan: mem_profiler_chan,
supports_clipboard: supports_clipboard,
webrender_api_sender: webrender_api_sender,
};
let (constellation_chan, from_swmanager_sender) =
Constellation::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>::start(initial_state);
if PREFS.is_webvr_enabled() {
// WebVR initialization
let (mut handler, sender) = WebVRCompositorHandler::new();
let webvr_thread = WebVRThread::spawn(constellation_chan.clone(), sender);
handler.set_webvr_thread_sender(webvr_thread.clone());
webrender.set_vr_compositor_handler(handler);
constellation_chan.send(ConstellationMsg::SetWebVRThread(webvr_thread)).unwrap();
}
if let Some(url) = url {
constellation_chan.send(ConstellationMsg::InitLoadUrl(url)).unwrap();
};
// channels to communicate with Service Worker Manager
let sw_senders = SWManagerSenders {
swmanager_sender: from_swmanager_sender,
resource_sender: resource_sender
};
(constellation_chan, sw_senders)
}
// A logger that logs to two downstream loggers.
// This should probably be in the log crate.
struct BothLogger<Log1, Log2>(Log1, Log2);
impl<Log1, Log2> Log for BothLogger<Log1, Log2> where Log1: Log, Log2: Log {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.0.enabled(metadata) || self.1.enabled(metadata)
}
fn log(&self, record: &LogRecord) {
self.0.log(record);
self.1.log(record);
}
}
pub fn set_logger(constellation_chan: IpcSender<ScriptMsg>) {
log::set_logger(|max_log_level| {
let env_logger = EnvLogger::new();
let con_logger = FromScriptLogger::new(constellation_chan);
let filter = max(env_logger.filter(), con_logger.filter());
let logger = BothLogger(env_logger, con_logger);
max_log_level.set(filter);
Box::new(logger)
}).expect("Failed to set logger.")
}
/// Content process entry point.
pub fn run_content_process(token: String) {
let (unprivileged_content_sender, unprivileged_content_receiver) =
ipc::channel::<UnprivilegedPipelineContent>().unwrap();
let connection_bootstrap: IpcSender<IpcSender<UnprivilegedPipelineContent>> =
IpcSender::connect(token).unwrap();
connection_bootstrap.send(unprivileged_content_sender).unwrap();
let unprivileged_content = unprivileged_content_receiver.recv().unwrap();
opts::set_defaults(unprivileged_content.opts());
PREFS.extend(unprivileged_content.prefs());
set_logger(unprivileged_content.constellation_chan());
// Enter the sandbox if necessary.
if opts::get().sandbox {
create_sandbox();
}
// send the required channels to the service worker manager
let sw_senders = unprivileged_content.swmanager_senders();
script::init();
script::init_service_workers(sw_senders);
unprivileged_content.start_all::<script_layout_interface::message::Msg,
layout_thread::LayoutThread,
script::script_thread::ScriptThread>(true);
}
// This is a workaround for https://github.com/rust-lang/rust/pull/30175 until
// https://github.com/lfairy/rust-errno/pull/5 lands, and should be removed once
// we update Servo with the rust-errno crate.
#[cfg(target_os = "android")]
#[no_mangle]
pub unsafe extern fn __errno_location() -> *mut i32 {
extern { fn __errno() -> *mut i32; }
__errno()
}
#[cfg(not(target_os = "windows"))]
fn create_sandbox() {
ChildSandbox::new(content_process_sandbox_profile()).activate()
.expect("Failed to activate sandbox!");
}
#[cfg(target_os = "windows")]
fn create_sandbox() {
panic!("Sandboxing is not supported on Windows.");
} | random_line_split |
|
rt-spawn-rate.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_start]
extern crate green;
extern crate rustuv;
use std::task::spawn;
use std::os;
use std::uint;
// Very simple spawn rate test. Spawn N tasks that do nothing and
// return.
#[start]
fn start(argc: int, argv: **u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main() {
let args = os::args();
let args = args.as_slice();
let n = if args.len() == 2 {
from_str::<uint>(args[1]).unwrap()
} else {
100000 | };
for _ in range(0, n) {
spawn(proc() {});
}
} | random_line_split |
|
rt-spawn-rate.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_start]
extern crate green;
extern crate rustuv;
use std::task::spawn;
use std::os;
use std::uint;
// Very simple spawn rate test. Spawn N tasks that do nothing and
// return.
#[start]
fn start(argc: int, argv: **u8) -> int |
fn main() {
let args = os::args();
let args = args.as_slice();
let n = if args.len() == 2 {
from_str::<uint>(args[1]).unwrap()
} else {
100000
};
for _ in range(0, n) {
spawn(proc() {});
}
}
| {
green::start(argc, argv, rustuv::event_loop, main)
} | identifier_body |
rt-spawn-rate.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_start]
extern crate green;
extern crate rustuv;
use std::task::spawn;
use std::os;
use std::uint;
// Very simple spawn rate test. Spawn N tasks that do nothing and
// return.
#[start]
fn | (argc: int, argv: **u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main() {
let args = os::args();
let args = args.as_slice();
let n = if args.len() == 2 {
from_str::<uint>(args[1]).unwrap()
} else {
100000
};
for _ in range(0, n) {
spawn(proc() {});
}
}
| start | identifier_name |
rt-spawn-rate.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_start]
extern crate green;
extern crate rustuv;
use std::task::spawn;
use std::os;
use std::uint;
// Very simple spawn rate test. Spawn N tasks that do nothing and
// return.
#[start]
fn start(argc: int, argv: **u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main() {
let args = os::args();
let args = args.as_slice();
let n = if args.len() == 2 | else {
100000
};
for _ in range(0, n) {
spawn(proc() {});
}
}
| {
from_str::<uint>(args[1]).unwrap()
} | conditional_block |
base64.rs | /// Entry point for encoding input strings into base64-encoded output strings.
pub fn encode(inp: &str) -> String {
let base64_index = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
.chars()
.collect::<Vec<char>>();
let mut result = String::new();
let mut it = inp.encode_utf16().map(|e| e as u32);
loop {
let triplet = vec![it.next(), it.next(), it.next()];
match triplet[0] {
None => break,
Some(_) => {}
}
let loop_end = triplet.iter().any(|e| match *e {
None => true,
_ => false,
}); // look if any of the elements is None and if so flip the bool switch so the loop will be broken out of after some final tasks.
let mut bit_string = 0u32;
for (i, item) in triplet.iter().map(|e| e.unwrap_or(0u32)).enumerate() {
// unwrap_or(some_value) unwraps the Option/Result and returns the value of Some(_) or the default some_value.
bit_string |= item;
if i!= 2 {
bit_string <<= 8;
}
}
let sextet3 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet2 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet1 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet0 = (bit_string & 0x3F) as usize;
let lsb1 = match triplet[1] {
None => '=',
_ => base64_index[sextet2],
};
let lsb0 = match triplet[2] {
None => '=',
_ => base64_index[sextet3],
};
result = format!("{}{}{}{}{}",
result,
base64_index[sextet0],
base64_index[sextet1],
lsb1,
lsb0);
if loop_end {
break;
}
}
result
}
/// Entry point for reversing base64-encoded input strings back to
pub fn decode(inp: &str) -> String {
let base64_index = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
.chars()
.collect::<Vec<char>>();
let inp = match inp.len() % 4 {
2 => format!("{}==", inp),
3 => format!("{}=", inp),
_ => inp.to_owned(),
};
let mut it = inp.as_str().chars().map(|e| e as u8);
let mut result = String::new();
loop {
let mut quartet_: Vec<Option<u8>> = vec![];
for _ in 0..4 {
quartet_.push(it.next());
}
if quartet_.iter().any(|e| match *e {
None => true,
_ => false,
}) {
break;
}
let quartet = quartet_.iter().map(|e| (*e).unwrap_or(0u8)).collect::<Vec<u8>>();
let mut bit_string = 0u32;
for (i, item) in quartet.iter().enumerate() {
bit_string |= base64_index.iter()
.position(|&x| x == (*item as char))
.unwrap_or(0usize) as u32;
if i!= 3 {
bit_string <<= 6;
}
}
let octet2 = match quartet[3] {
0x3D => 0x0,
_ => (bit_string & 0xFF) as u8,
};
bit_string >>= 8;
let octet1 = match quartet[2] {
0x3D => 0x0,
_ => (bit_string & 0xFF) as u8,
};
bit_string >>= 8;
let octet0 = (bit_string & 0xFF) as u8;
let (octet0, octet1, octet2) = (octet0 as char, octet1 as char, octet2 as char);
result = match (octet1, octet2) {
('\0', '\0') => format!("{}{}", result, octet0),
(_, '\0') => format!("{}{}{}", result, octet0, octet1),
_ => format!("{}{}{}{}", result, octet0, octet1, octet2),
}
}
result
}
#[cfg(test)] | animals, which is a lust of the mind, that by a perseverance of delight in the continued \
and indefatigable generation of knowledge, exceeds the short vehemence of any carnal \
pleasure."
}
fn leviathan_b64() -> &'static str {
"TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4="
}
#[test]
fn encode_man() {
assert_eq!("TWFu".to_owned(), encode("Man"));
}
#[test]
fn encode_leviathan() {
assert_eq!(leviathan_b64(), encode(leviathan()));
}
#[test]
fn decode_man() {
assert_eq!("Man".to_owned(), decode("TWFu"));
}
#[test]
fn decode_leviathan() {
assert_eq!(leviathan(), decode(leviathan_b64()));
}
} | mod tests {
use super::{encode, decode};
fn leviathan() -> &'static str {
"Man is distinguished, not only by his reason, but by this singular passion from other \ | random_line_split |
base64.rs | /// Entry point for encoding input strings into base64-encoded output strings.
pub fn encode(inp: &str) -> String {
let base64_index = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
.chars()
.collect::<Vec<char>>();
let mut result = String::new();
let mut it = inp.encode_utf16().map(|e| e as u32);
loop {
let triplet = vec![it.next(), it.next(), it.next()];
match triplet[0] {
None => break,
Some(_) => {}
}
let loop_end = triplet.iter().any(|e| match *e {
None => true,
_ => false,
}); // look if any of the elements is None and if so flip the bool switch so the loop will be broken out of after some final tasks.
let mut bit_string = 0u32;
for (i, item) in triplet.iter().map(|e| e.unwrap_or(0u32)).enumerate() {
// unwrap_or(some_value) unwraps the Option/Result and returns the value of Some(_) or the default some_value.
bit_string |= item;
if i!= 2 {
bit_string <<= 8;
}
}
let sextet3 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet2 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet1 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet0 = (bit_string & 0x3F) as usize;
let lsb1 = match triplet[1] {
None => '=',
_ => base64_index[sextet2],
};
let lsb0 = match triplet[2] {
None => '=',
_ => base64_index[sextet3],
};
result = format!("{}{}{}{}{}",
result,
base64_index[sextet0],
base64_index[sextet1],
lsb1,
lsb0);
if loop_end {
break;
}
}
result
}
/// Entry point for reversing base64-encoded input strings back to
pub fn decode(inp: &str) -> String {
let base64_index = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
.chars()
.collect::<Vec<char>>();
let inp = match inp.len() % 4 {
2 => format!("{}==", inp),
3 => format!("{}=", inp),
_ => inp.to_owned(),
};
let mut it = inp.as_str().chars().map(|e| e as u8);
let mut result = String::new();
loop {
let mut quartet_: Vec<Option<u8>> = vec![];
for _ in 0..4 {
quartet_.push(it.next());
}
if quartet_.iter().any(|e| match *e {
None => true,
_ => false,
}) {
break;
}
let quartet = quartet_.iter().map(|e| (*e).unwrap_or(0u8)).collect::<Vec<u8>>();
let mut bit_string = 0u32;
for (i, item) in quartet.iter().enumerate() {
bit_string |= base64_index.iter()
.position(|&x| x == (*item as char))
.unwrap_or(0usize) as u32;
if i!= 3 |
}
let octet2 = match quartet[3] {
0x3D => 0x0,
_ => (bit_string & 0xFF) as u8,
};
bit_string >>= 8;
let octet1 = match quartet[2] {
0x3D => 0x0,
_ => (bit_string & 0xFF) as u8,
};
bit_string >>= 8;
let octet0 = (bit_string & 0xFF) as u8;
let (octet0, octet1, octet2) = (octet0 as char, octet1 as char, octet2 as char);
result = match (octet1, octet2) {
('\0', '\0') => format!("{}{}", result, octet0),
(_, '\0') => format!("{}{}{}", result, octet0, octet1),
_ => format!("{}{}{}{}", result, octet0, octet1, octet2),
}
}
result
}
#[cfg(test)]
mod tests {
use super::{encode, decode};
fn leviathan() -> &'static str {
"Man is distinguished, not only by his reason, but by this singular passion from other \
animals, which is a lust of the mind, that by a perseverance of delight in the continued \
and indefatigable generation of knowledge, exceeds the short vehemence of any carnal \
pleasure."
}
fn leviathan_b64() -> &'static str {
"TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4="
}
#[test]
fn encode_man() {
assert_eq!("TWFu".to_owned(), encode("Man"));
}
#[test]
fn encode_leviathan() {
assert_eq!(leviathan_b64(), encode(leviathan()));
}
#[test]
fn decode_man() {
assert_eq!("Man".to_owned(), decode("TWFu"));
}
#[test]
fn decode_leviathan() {
assert_eq!(leviathan(), decode(leviathan_b64()));
}
}
| {
bit_string <<= 6;
} | conditional_block |
base64.rs | /// Entry point for encoding input strings into base64-encoded output strings.
pub fn encode(inp: &str) -> String {
let base64_index = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
.chars()
.collect::<Vec<char>>();
let mut result = String::new();
let mut it = inp.encode_utf16().map(|e| e as u32);
loop {
let triplet = vec![it.next(), it.next(), it.next()];
match triplet[0] {
None => break,
Some(_) => {}
}
let loop_end = triplet.iter().any(|e| match *e {
None => true,
_ => false,
}); // look if any of the elements is None and if so flip the bool switch so the loop will be broken out of after some final tasks.
let mut bit_string = 0u32;
for (i, item) in triplet.iter().map(|e| e.unwrap_or(0u32)).enumerate() {
// unwrap_or(some_value) unwraps the Option/Result and returns the value of Some(_) or the default some_value.
bit_string |= item;
if i!= 2 {
bit_string <<= 8;
}
}
let sextet3 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet2 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet1 = (bit_string & 0x3F) as usize;
bit_string >>= 6;
let sextet0 = (bit_string & 0x3F) as usize;
let lsb1 = match triplet[1] {
None => '=',
_ => base64_index[sextet2],
};
let lsb0 = match triplet[2] {
None => '=',
_ => base64_index[sextet3],
};
result = format!("{}{}{}{}{}",
result,
base64_index[sextet0],
base64_index[sextet1],
lsb1,
lsb0);
if loop_end {
break;
}
}
result
}
/// Entry point for reversing base64-encoded input strings back to
pub fn decode(inp: &str) -> String {
let base64_index = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
.chars()
.collect::<Vec<char>>();
let inp = match inp.len() % 4 {
2 => format!("{}==", inp),
3 => format!("{}=", inp),
_ => inp.to_owned(),
};
let mut it = inp.as_str().chars().map(|e| e as u8);
let mut result = String::new();
loop {
let mut quartet_: Vec<Option<u8>> = vec![];
for _ in 0..4 {
quartet_.push(it.next());
}
if quartet_.iter().any(|e| match *e {
None => true,
_ => false,
}) {
break;
}
let quartet = quartet_.iter().map(|e| (*e).unwrap_or(0u8)).collect::<Vec<u8>>();
let mut bit_string = 0u32;
for (i, item) in quartet.iter().enumerate() {
bit_string |= base64_index.iter()
.position(|&x| x == (*item as char))
.unwrap_or(0usize) as u32;
if i!= 3 {
bit_string <<= 6;
}
}
let octet2 = match quartet[3] {
0x3D => 0x0,
_ => (bit_string & 0xFF) as u8,
};
bit_string >>= 8;
let octet1 = match quartet[2] {
0x3D => 0x0,
_ => (bit_string & 0xFF) as u8,
};
bit_string >>= 8;
let octet0 = (bit_string & 0xFF) as u8;
let (octet0, octet1, octet2) = (octet0 as char, octet1 as char, octet2 as char);
result = match (octet1, octet2) {
('\0', '\0') => format!("{}{}", result, octet0),
(_, '\0') => format!("{}{}{}", result, octet0, octet1),
_ => format!("{}{}{}{}", result, octet0, octet1, octet2),
}
}
result
}
#[cfg(test)]
mod tests {
use super::{encode, decode};
fn leviathan() -> &'static str {
"Man is distinguished, not only by his reason, but by this singular passion from other \
animals, which is a lust of the mind, that by a perseverance of delight in the continued \
and indefatigable generation of knowledge, exceeds the short vehemence of any carnal \
pleasure."
}
fn leviathan_b64() -> &'static str {
"TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4="
}
#[test]
fn encode_man() {
assert_eq!("TWFu".to_owned(), encode("Man"));
}
#[test]
fn | () {
assert_eq!(leviathan_b64(), encode(leviathan()));
}
#[test]
fn decode_man() {
assert_eq!("Man".to_owned(), decode("TWFu"));
}
#[test]
fn decode_leviathan() {
assert_eq!(leviathan(), decode(leviathan_b64()));
}
}
| encode_leviathan | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.