file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
md-butane.rs
|
// Lumol, an extensible molecular simulation engine
// Copyright (C) Lumol's contributors — BSD license
//! Testing molecular dynamics of butane
use lumol::input::Input;
use std::path::Path;
use std::sync::Once;
static START: Once = Once::new();
#[test]
fn bo
|
{
START.call_once(::env_logger::init);
let path = Path::new(file!()).parent()
.unwrap()
.join("data")
.join("md-butane")
.join("nve.toml");
let system = Input::new(path).unwrap().read_system().unwrap();
assert_eq!(system.molecules().count(), 50);
for molecule in system.molecules() {
assert_eq!(molecule.bonds().len(), 3);
assert_eq!(molecule.angles().len(), 2);
assert_eq!(molecule.dihedrals().len(), 1);
}
}
#[test]
fn constant_energy() {
START.call_once(::env_logger::init);
let path = Path::new(file!()).parent()
.unwrap()
.join("data")
.join("md-butane")
.join("nve.toml");
let mut config = Input::new(path).unwrap().read().unwrap();
let e_initial = config.system.total_energy();
config.simulation.run(&mut config.system, config.nsteps);
let e_final = config.system.total_energy();
assert!(f64::abs((e_initial - e_final) / e_final) < 1e-3);
}
|
nds_detection()
|
identifier_name
|
md-butane.rs
|
// Lumol, an extensible molecular simulation engine
// Copyright (C) Lumol's contributors — BSD license
//! Testing molecular dynamics of butane
use lumol::input::Input;
use std::path::Path;
use std::sync::Once;
static START: Once = Once::new();
#[test]
fn bonds_detection() {
|
#[test]
fn constant_energy() {
START.call_once(::env_logger::init);
let path = Path::new(file!()).parent()
.unwrap()
.join("data")
.join("md-butane")
.join("nve.toml");
let mut config = Input::new(path).unwrap().read().unwrap();
let e_initial = config.system.total_energy();
config.simulation.run(&mut config.system, config.nsteps);
let e_final = config.system.total_energy();
assert!(f64::abs((e_initial - e_final) / e_final) < 1e-3);
}
|
START.call_once(::env_logger::init);
let path = Path::new(file!()).parent()
.unwrap()
.join("data")
.join("md-butane")
.join("nve.toml");
let system = Input::new(path).unwrap().read_system().unwrap();
assert_eq!(system.molecules().count(), 50);
for molecule in system.molecules() {
assert_eq!(molecule.bonds().len(), 3);
assert_eq!(molecule.angles().len(), 2);
assert_eq!(molecule.dihedrals().len(), 1);
}
}
|
identifier_body
|
x86_64.rs
|
use std::mem;
use crate::runtime::Imp;
extern {
fn objc_msgSend();
fn objc_msgSend_stret();
fn objc_msgSendSuper();
fn objc_msgSendSuper_stret();
}
pub fn msg_send_fn<R>() -> Imp {
// If the size of an object is larger than two eightbytes, it has class MEMORY.
// If the type has class MEMORY, then the caller provides space for the return
// value and passes the address of this storage.
// <http://people.freebsd.org/~obrien/amd64-elf-abi.pdf>
if mem::size_of::<R>() <= 16 {
objc_msgSend
} else {
objc_msgSend_stret
}
}
pub fn
|
<R>() -> Imp {
if mem::size_of::<R>() <= 16 {
objc_msgSendSuper
} else {
objc_msgSendSuper_stret
}
}
|
msg_send_super_fn
|
identifier_name
|
x86_64.rs
|
use std::mem;
use crate::runtime::Imp;
extern {
fn objc_msgSend();
fn objc_msgSend_stret();
fn objc_msgSendSuper();
fn objc_msgSendSuper_stret();
}
pub fn msg_send_fn<R>() -> Imp {
// If the size of an object is larger than two eightbytes, it has class MEMORY.
// If the type has class MEMORY, then the caller provides space for the return
|
} else {
objc_msgSend_stret
}
}
pub fn msg_send_super_fn<R>() -> Imp {
if mem::size_of::<R>() <= 16 {
objc_msgSendSuper
} else {
objc_msgSendSuper_stret
}
}
|
// value and passes the address of this storage.
// <http://people.freebsd.org/~obrien/amd64-elf-abi.pdf>
if mem::size_of::<R>() <= 16 {
objc_msgSend
|
random_line_split
|
x86_64.rs
|
use std::mem;
use crate::runtime::Imp;
extern {
fn objc_msgSend();
fn objc_msgSend_stret();
fn objc_msgSendSuper();
fn objc_msgSendSuper_stret();
}
pub fn msg_send_fn<R>() -> Imp {
// If the size of an object is larger than two eightbytes, it has class MEMORY.
// If the type has class MEMORY, then the caller provides space for the return
// value and passes the address of this storage.
// <http://people.freebsd.org/~obrien/amd64-elf-abi.pdf>
if mem::size_of::<R>() <= 16
|
else {
objc_msgSend_stret
}
}
pub fn msg_send_super_fn<R>() -> Imp {
if mem::size_of::<R>() <= 16 {
objc_msgSendSuper
} else {
objc_msgSendSuper_stret
}
}
|
{
objc_msgSend
}
|
conditional_block
|
x86_64.rs
|
use std::mem;
use crate::runtime::Imp;
extern {
fn objc_msgSend();
fn objc_msgSend_stret();
fn objc_msgSendSuper();
fn objc_msgSendSuper_stret();
}
pub fn msg_send_fn<R>() -> Imp {
// If the size of an object is larger than two eightbytes, it has class MEMORY.
// If the type has class MEMORY, then the caller provides space for the return
// value and passes the address of this storage.
// <http://people.freebsd.org/~obrien/amd64-elf-abi.pdf>
if mem::size_of::<R>() <= 16 {
objc_msgSend
} else {
objc_msgSend_stret
}
}
pub fn msg_send_super_fn<R>() -> Imp
|
{
if mem::size_of::<R>() <= 16 {
objc_msgSendSuper
} else {
objc_msgSendSuper_stret
}
}
|
identifier_body
|
|
map.rs
|
use core::ptr;
use super::area::PhysMemoryRange;
use super::constants::*;
use super::page_align;
use super::prelude::*;
/// Maximum number of ok-to-use entries
pub const MAX_OK_ENTRIES: usize = 20;
#[rustfmt::skip]
fn read_item(index: usize) -> (u64, u64, u32, u32) {
let base = (BOOT_TMP_MMAP_BUFFER + 2u64).as_u64() as *mut u8;
let e_start: u64 = unsafe { ptr::read_unaligned(base.add(24*index ) as *mut u64) };
let e_size: u64 = unsafe { ptr::read_unaligned(base.add(24*index + 8) as *mut u64) };
let e_type: u32 = unsafe { ptr::read_unaligned(base.add(24*index + 16) as *mut u32) };
let e_acpi_data: u32 = unsafe { ptr::read_unaligned(base.add(24*index + 20) as *mut u32) };
(e_start, e_size, e_type, e_acpi_data)
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryRanges([Option<PhysMemoryRange>; MAX_OK_ENTRIES]);
impl MemoryRanges {
const fn new() -> Self {
Self([None; MAX_OK_ENTRIES])
}
fn write_entry(&mut self, entry: PhysMemoryRange) {
let mut first_free = None;
for i in 0..MAX_OK_ENTRIES {
if let Some(ok) = self.0[i] {
if ok.can_merge(entry) {
self.0[i] = Some(ok.merge(entry));
return;
}
} else if first_free.is_none() {
first_free = Some(i);
}
}
self.0[first_free.expect("No free entries left")] = Some(entry);
}
fn split_and_write_entry(&mut self, entry: PhysMemoryRange) {
// These are permanently reserved for the kernel
if let Some(ok) = entry.above(MEMORY_RESERVED_BELOW) {
// These are permanently reserved for the heap
if let Some(below) = ok.below(PhysAddr::new(HEAP_START)) {
self.write_entry(below);
}
if let Some(above) = ok.above(PhysAddr::new(HEAP_START + HEAP_SIZE)) {
self.write_entry(above);
}
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryInfo {
/// Memory that can be allocated using some allocation method
pub allocatable: [Option<PhysMemoryRange>; MAX_OK_ENTRIES],
/// All physical memory that exists
pub max_memory: u64,
}
pub(crate) fn load_memory_map() -> MemoryInfo {
// load memory map from where out bootloader left it
// http://wiki.osdev.org/Detecting_Memory_(x86)#BIOS_Function:_INT_0x15.2C_EAX_.3D_0xE820
let mut allocatable = MemoryRanges::new();
let mut max_memory = 0u64;
{
let entry_count: u8 =
unsafe { ptr::read_volatile(BOOT_TMP_MMAP_BUFFER.as_u64() as *mut u8) };
for index in 0..(entry_count as usize) {
let (e_start, e_size, e_type, e_acpi_data) = read_item(index);
log::trace!(
"Section {:>3}: {:>16x}-{:>16x}: type: {:#x}, acpi: {:#x}",
index,
e_start,
e_start + e_size,
e_type,
e_acpi_data
);
// Mappable area
max_memory = max_memory.max(e_start + e_size);
// Frame data, accept only full frames
let start = page_align(PhysAddr::new(e_start), true);
let end = page_align(PhysAddr::new(e_start + e_size), false);
if start == end {
continue;
}
// acpi_data bit 0 must be set
if (e_acpi_data & 1)!= 1 {
continue;
}
// Types 1, 4 ok to use
let alloc_ok = e_type == 1 || e_type == 4;
if alloc_ok {
allocatable.split_and_write_entry(PhysMemoryRange::range(start..end));
}
}
}
// TODO: Check that required memory regions exist
// Calculate and display memory size
let mut memory_counter_bytes: u64 = 0;
for entry in &allocatable.0 {
if let Some(area) = entry {
memory_counter_bytes += area.size_bytes() as u64;
log::debug!("Area : {:>16x}-{:>16x}", area.start(), area.end());
}
}
if memory_counter_bytes < 1024 * 1024 * 1024 {
log::info!("Memory size {} MiB", memory_counter_bytes / (1024 * 1024));
} else {
let full_gibs = memory_counter_bytes / (1024 * 1024 * 1024);
let cent_gibs = (memory_counter_bytes % (1024 * 1024 * 1024)) / 1024_00_000;
log::info!("Memory size {}.{:02} GiB", full_gibs, cent_gibs);
}
|
}
|
MemoryInfo {
allocatable: allocatable.0,
max_memory,
}
|
random_line_split
|
map.rs
|
use core::ptr;
use super::area::PhysMemoryRange;
use super::constants::*;
use super::page_align;
use super::prelude::*;
/// Maximum number of ok-to-use entries
pub const MAX_OK_ENTRIES: usize = 20;
#[rustfmt::skip]
fn read_item(index: usize) -> (u64, u64, u32, u32) {
let base = (BOOT_TMP_MMAP_BUFFER + 2u64).as_u64() as *mut u8;
let e_start: u64 = unsafe { ptr::read_unaligned(base.add(24*index ) as *mut u64) };
let e_size: u64 = unsafe { ptr::read_unaligned(base.add(24*index + 8) as *mut u64) };
let e_type: u32 = unsafe { ptr::read_unaligned(base.add(24*index + 16) as *mut u32) };
let e_acpi_data: u32 = unsafe { ptr::read_unaligned(base.add(24*index + 20) as *mut u32) };
(e_start, e_size, e_type, e_acpi_data)
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryRanges([Option<PhysMemoryRange>; MAX_OK_ENTRIES]);
impl MemoryRanges {
const fn new() -> Self {
Self([None; MAX_OK_ENTRIES])
}
fn write_entry(&mut self, entry: PhysMemoryRange) {
let mut first_free = None;
for i in 0..MAX_OK_ENTRIES {
if let Some(ok) = self.0[i] {
if ok.can_merge(entry) {
self.0[i] = Some(ok.merge(entry));
return;
}
} else if first_free.is_none() {
first_free = Some(i);
}
}
self.0[first_free.expect("No free entries left")] = Some(entry);
}
fn split_and_write_entry(&mut self, entry: PhysMemoryRange) {
// These are permanently reserved for the kernel
if let Some(ok) = entry.above(MEMORY_RESERVED_BELOW) {
// These are permanently reserved for the heap
if let Some(below) = ok.below(PhysAddr::new(HEAP_START)) {
self.write_entry(below);
}
if let Some(above) = ok.above(PhysAddr::new(HEAP_START + HEAP_SIZE)) {
self.write_entry(above);
}
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryInfo {
/// Memory that can be allocated using some allocation method
pub allocatable: [Option<PhysMemoryRange>; MAX_OK_ENTRIES],
/// All physical memory that exists
pub max_memory: u64,
}
pub(crate) fn
|
() -> MemoryInfo {
// load memory map from where out bootloader left it
// http://wiki.osdev.org/Detecting_Memory_(x86)#BIOS_Function:_INT_0x15.2C_EAX_.3D_0xE820
let mut allocatable = MemoryRanges::new();
let mut max_memory = 0u64;
{
let entry_count: u8 =
unsafe { ptr::read_volatile(BOOT_TMP_MMAP_BUFFER.as_u64() as *mut u8) };
for index in 0..(entry_count as usize) {
let (e_start, e_size, e_type, e_acpi_data) = read_item(index);
log::trace!(
"Section {:>3}: {:>16x}-{:>16x}: type: {:#x}, acpi: {:#x}",
index,
e_start,
e_start + e_size,
e_type,
e_acpi_data
);
// Mappable area
max_memory = max_memory.max(e_start + e_size);
// Frame data, accept only full frames
let start = page_align(PhysAddr::new(e_start), true);
let end = page_align(PhysAddr::new(e_start + e_size), false);
if start == end {
continue;
}
// acpi_data bit 0 must be set
if (e_acpi_data & 1)!= 1 {
continue;
}
// Types 1, 4 ok to use
let alloc_ok = e_type == 1 || e_type == 4;
if alloc_ok {
allocatable.split_and_write_entry(PhysMemoryRange::range(start..end));
}
}
}
// TODO: Check that required memory regions exist
// Calculate and display memory size
let mut memory_counter_bytes: u64 = 0;
for entry in &allocatable.0 {
if let Some(area) = entry {
memory_counter_bytes += area.size_bytes() as u64;
log::debug!("Area : {:>16x}-{:>16x}", area.start(), area.end());
}
}
if memory_counter_bytes < 1024 * 1024 * 1024 {
log::info!("Memory size {} MiB", memory_counter_bytes / (1024 * 1024));
} else {
let full_gibs = memory_counter_bytes / (1024 * 1024 * 1024);
let cent_gibs = (memory_counter_bytes % (1024 * 1024 * 1024)) / 1024_00_000;
log::info!("Memory size {}.{:02} GiB", full_gibs, cent_gibs);
}
MemoryInfo {
allocatable: allocatable.0,
max_memory,
}
}
|
load_memory_map
|
identifier_name
|
map.rs
|
use core::ptr;
use super::area::PhysMemoryRange;
use super::constants::*;
use super::page_align;
use super::prelude::*;
/// Maximum number of ok-to-use entries
pub const MAX_OK_ENTRIES: usize = 20;
#[rustfmt::skip]
fn read_item(index: usize) -> (u64, u64, u32, u32) {
let base = (BOOT_TMP_MMAP_BUFFER + 2u64).as_u64() as *mut u8;
let e_start: u64 = unsafe { ptr::read_unaligned(base.add(24*index ) as *mut u64) };
let e_size: u64 = unsafe { ptr::read_unaligned(base.add(24*index + 8) as *mut u64) };
let e_type: u32 = unsafe { ptr::read_unaligned(base.add(24*index + 16) as *mut u32) };
let e_acpi_data: u32 = unsafe { ptr::read_unaligned(base.add(24*index + 20) as *mut u32) };
(e_start, e_size, e_type, e_acpi_data)
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryRanges([Option<PhysMemoryRange>; MAX_OK_ENTRIES]);
impl MemoryRanges {
const fn new() -> Self {
Self([None; MAX_OK_ENTRIES])
}
fn write_entry(&mut self, entry: PhysMemoryRange) {
let mut first_free = None;
for i in 0..MAX_OK_ENTRIES {
if let Some(ok) = self.0[i] {
if ok.can_merge(entry)
|
} else if first_free.is_none() {
first_free = Some(i);
}
}
self.0[first_free.expect("No free entries left")] = Some(entry);
}
fn split_and_write_entry(&mut self, entry: PhysMemoryRange) {
// These are permanently reserved for the kernel
if let Some(ok) = entry.above(MEMORY_RESERVED_BELOW) {
// These are permanently reserved for the heap
if let Some(below) = ok.below(PhysAddr::new(HEAP_START)) {
self.write_entry(below);
}
if let Some(above) = ok.above(PhysAddr::new(HEAP_START + HEAP_SIZE)) {
self.write_entry(above);
}
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryInfo {
/// Memory that can be allocated using some allocation method
pub allocatable: [Option<PhysMemoryRange>; MAX_OK_ENTRIES],
/// All physical memory that exists
pub max_memory: u64,
}
pub(crate) fn load_memory_map() -> MemoryInfo {
// load memory map from where out bootloader left it
// http://wiki.osdev.org/Detecting_Memory_(x86)#BIOS_Function:_INT_0x15.2C_EAX_.3D_0xE820
let mut allocatable = MemoryRanges::new();
let mut max_memory = 0u64;
{
let entry_count: u8 =
unsafe { ptr::read_volatile(BOOT_TMP_MMAP_BUFFER.as_u64() as *mut u8) };
for index in 0..(entry_count as usize) {
let (e_start, e_size, e_type, e_acpi_data) = read_item(index);
log::trace!(
"Section {:>3}: {:>16x}-{:>16x}: type: {:#x}, acpi: {:#x}",
index,
e_start,
e_start + e_size,
e_type,
e_acpi_data
);
// Mappable area
max_memory = max_memory.max(e_start + e_size);
// Frame data, accept only full frames
let start = page_align(PhysAddr::new(e_start), true);
let end = page_align(PhysAddr::new(e_start + e_size), false);
if start == end {
continue;
}
// acpi_data bit 0 must be set
if (e_acpi_data & 1)!= 1 {
continue;
}
// Types 1, 4 ok to use
let alloc_ok = e_type == 1 || e_type == 4;
if alloc_ok {
allocatable.split_and_write_entry(PhysMemoryRange::range(start..end));
}
}
}
// TODO: Check that required memory regions exist
// Calculate and display memory size
let mut memory_counter_bytes: u64 = 0;
for entry in &allocatable.0 {
if let Some(area) = entry {
memory_counter_bytes += area.size_bytes() as u64;
log::debug!("Area : {:>16x}-{:>16x}", area.start(), area.end());
}
}
if memory_counter_bytes < 1024 * 1024 * 1024 {
log::info!("Memory size {} MiB", memory_counter_bytes / (1024 * 1024));
} else {
let full_gibs = memory_counter_bytes / (1024 * 1024 * 1024);
let cent_gibs = (memory_counter_bytes % (1024 * 1024 * 1024)) / 1024_00_000;
log::info!("Memory size {}.{:02} GiB", full_gibs, cent_gibs);
}
MemoryInfo {
allocatable: allocatable.0,
max_memory,
}
}
|
{
self.0[i] = Some(ok.merge(entry));
return;
}
|
conditional_block
|
map.rs
|
use core::ptr;
use super::area::PhysMemoryRange;
use super::constants::*;
use super::page_align;
use super::prelude::*;
/// Maximum number of ok-to-use entries
pub const MAX_OK_ENTRIES: usize = 20;
#[rustfmt::skip]
fn read_item(index: usize) -> (u64, u64, u32, u32) {
let base = (BOOT_TMP_MMAP_BUFFER + 2u64).as_u64() as *mut u8;
let e_start: u64 = unsafe { ptr::read_unaligned(base.add(24*index ) as *mut u64) };
let e_size: u64 = unsafe { ptr::read_unaligned(base.add(24*index + 8) as *mut u64) };
let e_type: u32 = unsafe { ptr::read_unaligned(base.add(24*index + 16) as *mut u32) };
let e_acpi_data: u32 = unsafe { ptr::read_unaligned(base.add(24*index + 20) as *mut u32) };
(e_start, e_size, e_type, e_acpi_data)
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryRanges([Option<PhysMemoryRange>; MAX_OK_ENTRIES]);
impl MemoryRanges {
const fn new() -> Self {
Self([None; MAX_OK_ENTRIES])
}
fn write_entry(&mut self, entry: PhysMemoryRange)
|
fn split_and_write_entry(&mut self, entry: PhysMemoryRange) {
// These are permanently reserved for the kernel
if let Some(ok) = entry.above(MEMORY_RESERVED_BELOW) {
// These are permanently reserved for the heap
if let Some(below) = ok.below(PhysAddr::new(HEAP_START)) {
self.write_entry(below);
}
if let Some(above) = ok.above(PhysAddr::new(HEAP_START + HEAP_SIZE)) {
self.write_entry(above);
}
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct MemoryInfo {
/// Memory that can be allocated using some allocation method
pub allocatable: [Option<PhysMemoryRange>; MAX_OK_ENTRIES],
/// All physical memory that exists
pub max_memory: u64,
}
pub(crate) fn load_memory_map() -> MemoryInfo {
// load memory map from where out bootloader left it
// http://wiki.osdev.org/Detecting_Memory_(x86)#BIOS_Function:_INT_0x15.2C_EAX_.3D_0xE820
let mut allocatable = MemoryRanges::new();
let mut max_memory = 0u64;
{
let entry_count: u8 =
unsafe { ptr::read_volatile(BOOT_TMP_MMAP_BUFFER.as_u64() as *mut u8) };
for index in 0..(entry_count as usize) {
let (e_start, e_size, e_type, e_acpi_data) = read_item(index);
log::trace!(
"Section {:>3}: {:>16x}-{:>16x}: type: {:#x}, acpi: {:#x}",
index,
e_start,
e_start + e_size,
e_type,
e_acpi_data
);
// Mappable area
max_memory = max_memory.max(e_start + e_size);
// Frame data, accept only full frames
let start = page_align(PhysAddr::new(e_start), true);
let end = page_align(PhysAddr::new(e_start + e_size), false);
if start == end {
continue;
}
// acpi_data bit 0 must be set
if (e_acpi_data & 1)!= 1 {
continue;
}
// Types 1, 4 ok to use
let alloc_ok = e_type == 1 || e_type == 4;
if alloc_ok {
allocatable.split_and_write_entry(PhysMemoryRange::range(start..end));
}
}
}
// TODO: Check that required memory regions exist
// Calculate and display memory size
let mut memory_counter_bytes: u64 = 0;
for entry in &allocatable.0 {
if let Some(area) = entry {
memory_counter_bytes += area.size_bytes() as u64;
log::debug!("Area : {:>16x}-{:>16x}", area.start(), area.end());
}
}
if memory_counter_bytes < 1024 * 1024 * 1024 {
log::info!("Memory size {} MiB", memory_counter_bytes / (1024 * 1024));
} else {
let full_gibs = memory_counter_bytes / (1024 * 1024 * 1024);
let cent_gibs = (memory_counter_bytes % (1024 * 1024 * 1024)) / 1024_00_000;
log::info!("Memory size {}.{:02} GiB", full_gibs, cent_gibs);
}
MemoryInfo {
allocatable: allocatable.0,
max_memory,
}
}
|
{
let mut first_free = None;
for i in 0..MAX_OK_ENTRIES {
if let Some(ok) = self.0[i] {
if ok.can_merge(entry) {
self.0[i] = Some(ok.merge(entry));
return;
}
} else if first_free.is_none() {
first_free = Some(i);
}
}
self.0[first_free.expect("No free entries left")] = Some(entry);
}
|
identifier_body
|
encoding.rs
|
// Copyright (c) 2015, Sam Payson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
// associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
// NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// TODO: Remove this when things stabilize.
#![allow(dead_code)]
use encoding::Quantifier::*;
#[derive(Copy,Eq,PartialEq)]
#[allow(missing_docs)]
/// A `Type` indicates the type of a record field.
pub enum Type {
Int8,
Int16,
Int32,
Int64,
UInt8,
UInt16,
UInt32,
UInt64,
Fixed32,
Fixed64,
Float32,
Float64,
Bytes,
String,
Bool,
Enum,
// A `Record` is any type which is encoded as >= FirstUnused (as specified in encoding.rex).
// The `index` field gives an index into the `depends` field of the CompleteEncoding which
// provides encoding information for this type.
Record{index: usize},
}
/// A `FieldID` represents the id of a record field.
#[derive(Eq,PartialEq,Ord,PartialOrd,Copy)]
pub struct FieldID(pub u64);
impl Type {
/// `from_u64` converts a `u64` to a `Type`. It uses the mapping established in `encoding.rex`.
pub fn from_u64(x: u64) -> Type {
let first_unused = 16;
match x {
0 => Type::Int8,
1 => Type::Int16,
2 => Type::Int32,
3 => Type::Int64,
4 => Type::UInt8,
5 => Type::UInt16,
6 => Type::UInt32,
7 => Type::UInt64,
8 => Type::Fixed32,
9 => Type::Fixed64,
10 => Type::Float32,
11 => Type::Float64,
12 => Type::Bytes,
13 => Type::String,
14 => Type::Bool,
15 => Type::Enum,
xx => Type::Record{index: (xx - first_unused) as usize},
}
}
}
/// The Quantifier type gives the multiplicity of a field. A Required field has exactly 1 element, an
/// Optional field has 0 or 1 elements, and a Repeated field has 0 or more elements.
#[derive(Debug,Copy,PartialEq,Eq)]
#[allow(missing_docs)]
pub enum Quantifier {
Required = 0,
Optional = 1,
Repeated = 2,
}
/// A `FieldEncoding` describes the encoding of a single field of a record. The `Type` field may be
/// a reference to a `RecordEncoding` which can be resolved by consulting the `depends` field of
/// the containing `CompleteEncoding`.
#[derive(PartialEq,Eq)]
pub struct FieldEncoding {
/// Integer id of this field within its containing record.
pub id: FieldID,
/// Name of this field in the.rex file, not used in the encoding.
pub name: String,
/// Is this field Required, Optional (opt), or Repeated (rep)?
pub quant: Quantifier,
/// Type of this field.
pub typ: Type,
/// The bounds field is the product of all bounds in an array field. So for example, the field
///
/// 1 matrix : [3][3]float32
///
/// would have a bounds field of 3*3 = 9.
///
/// The bounds field is not present for non-array types.
pub bounds: Option<usize>,
}
/// A `RecordEncoding` describes the encoding of a particular record type. It may contain
/// references to other `RecordEncoding`s which can be resolved by consulting the `depends` field
/// of the containing `CompleteEncoding`.
#[derive(PartialEq,Eq)]
pub struct RecordEncoding {
/// Name of the record type in the.rex file, not used in the encoding.
pub name: String,
/// Required fields of this record type, sorted by id.
pub req_fields: Vec<FieldEncoding>,
/// Optional and repeated fields of this record type, sorted by id.
pub opt_rep_fields: Vec<FieldEncoding>,
}
impl RecordEncoding {
fn sort_fields(&mut self) {
self.req_fields.sort_by(|x, y| x.id.cmp(&y.id));
self.opt_rep_fields.sort_by(|x, y| x.id.cmp(&y.id));
}
}
/// A `CompleteEncoding` provides all of the information necessary to encode or decode a particular
/// record type (and every record type that it can contain).
#[derive(PartialEq,Eq)]
pub struct CompleteEncoding {
/// The record type that this CompleteEncoding describes.
pub target: RecordEncoding,
/// Encodings for all dependencies of target. If a field has a type (t >= Type::FirstUnused),
/// then a RecordEncoding for that type is at depends[t - Type::FirstUnused].
pub depends: Vec<RecordEncoding>,
}
impl CompleteEncoding {
/// The `req_fields` and `opt_rep_fields` fields of a `RecordEncoding` are expected to be
/// sorted. This method performs that sort on `target` and each member of `depends`.
pub fn
|
(&mut self) {
self.target.sort_fields();
for dep in self.depends.iter_mut() {
dep.sort_fields();
}
}
}
pub use encoding::doc_workaround::COMPLETE_ENC;
mod doc_workaround {
#![allow(missing_docs)]
use encoding::*;
use encoding::Quantifier::*;
// These are indices into COMPLETE_ENC.depends, below. See docs for that field on the
// CompleteEncoding type.
const FIELD_ENCODING_TYP: Type = Type::Record{index: 0};
const RECORD_ENCODING_TYP: Type = Type::Record{index: 1};
lazy_static! {
// I apologize in advance for the confusing-ness of this comment.
//
// Encodings for records are themselves encoded, so we need to solve the chicken/egg problem in
// order to be able to interpret the encodings of encodings :3.
//
// We do this by providing a pre-decoded encoding for encodings. That's what this lovely
// structure is.
pub static ref COMPLETE_ENC: CompleteEncoding = CompleteEncoding {
target: RecordEncoding {
name: "CompleteEncoding".to_string(),
req_fields: vec![
FieldEncoding {
id: FieldID(1),
name: "target".to_string(),
quant: Required,
typ: RECORD_ENCODING_TYP,
bounds: None
},
],
opt_rep_fields: vec![
FieldEncoding {
id: FieldID(2),
quant: Repeated,
name: "depends".to_string(),
typ: RECORD_ENCODING_TYP,
bounds: None
},
],
},
depends: vec![
RecordEncoding {
name: "FieldEncoding".to_string(),
req_fields: vec![
FieldEncoding {
id: FieldID(1),
name: "id".to_string(),
quant: Required,
typ: Type::UInt64,
bounds: None
},
FieldEncoding {
id: FieldID(2),
name: "name".to_string(),
quant: Required,
typ: Type::String,
bounds: None
},
FieldEncoding {
id: FieldID(3),
name: "quant".to_string(),
quant: Required,
typ: Type::Enum,
bounds: None
},
FieldEncoding {
id: FieldID(4),
name: "typ".to_string(),
quant: Required,
typ: Type::Enum,
bounds: None
},
FieldEncoding {
id: FieldID(5),
name: "bounds".to_string(),
quant: Required,
typ: Type::UInt64,
bounds: None
},
],
opt_rep_fields: vec![]
},
RecordEncoding {
name: "RecordEncoding".to_string(),
req_fields: vec![
FieldEncoding {
id: FieldID(1),
name: "name".to_string(),
quant: Required,
typ: Type::String,
bounds: None
},
],
opt_rep_fields: vec![
FieldEncoding {
id: FieldID(2),
name: "req_fields".to_string(),
quant: Repeated,
typ: FIELD_ENCODING_TYP,
bounds: None
},
FieldEncoding {
id: FieldID(3),
name: "opt_rep_fields".to_string(),
quant: Repeated,
typ: FIELD_ENCODING_TYP,
bounds: None
},
],
},
],
};
}
}
|
sort_fields
|
identifier_name
|
encoding.rs
|
// Copyright (c) 2015, Sam Payson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
// associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
// NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// TODO: Remove this when things stabilize.
#![allow(dead_code)]
use encoding::Quantifier::*;
#[derive(Copy,Eq,PartialEq)]
#[allow(missing_docs)]
/// A `Type` indicates the type of a record field.
pub enum Type {
Int8,
Int16,
Int32,
Int64,
UInt8,
UInt16,
UInt32,
UInt64,
Fixed32,
Fixed64,
Float32,
Float64,
Bytes,
String,
Bool,
Enum,
// A `Record` is any type which is encoded as >= FirstUnused (as specified in encoding.rex).
// The `index` field gives an index into the `depends` field of the CompleteEncoding which
// provides encoding information for this type.
Record{index: usize},
}
/// A `FieldID` represents the id of a record field.
#[derive(Eq,PartialEq,Ord,PartialOrd,Copy)]
pub struct FieldID(pub u64);
impl Type {
/// `from_u64` converts a `u64` to a `Type`. It uses the mapping established in `encoding.rex`.
pub fn from_u64(x: u64) -> Type {
let first_unused = 16;
match x {
0 => Type::Int8,
1 => Type::Int16,
2 => Type::Int32,
3 => Type::Int64,
4 => Type::UInt8,
5 => Type::UInt16,
6 => Type::UInt32,
7 => Type::UInt64,
8 => Type::Fixed32,
9 => Type::Fixed64,
10 => Type::Float32,
11 => Type::Float64,
12 => Type::Bytes,
13 => Type::String,
14 => Type::Bool,
15 => Type::Enum,
xx => Type::Record{index: (xx - first_unused) as usize},
}
}
}
/// The Quantifier type gives the multiplicity of a field. A Required field has exactly 1 element, an
/// Optional field has 0 or 1 elements, and a Repeated field has 0 or more elements.
#[derive(Debug,Copy,PartialEq,Eq)]
#[allow(missing_docs)]
pub enum Quantifier {
Required = 0,
Optional = 1,
Repeated = 2,
}
/// A `FieldEncoding` describes the encoding of a single field of a record. The `Type` field may be
/// a reference to a `RecordEncoding` which can be resolved by consulting the `depends` field of
/// the containing `CompleteEncoding`.
#[derive(PartialEq,Eq)]
pub struct FieldEncoding {
/// Integer id of this field within its containing record.
pub id: FieldID,
/// Name of this field in the.rex file, not used in the encoding.
pub name: String,
/// Is this field Required, Optional (opt), or Repeated (rep)?
pub quant: Quantifier,
/// Type of this field.
pub typ: Type,
/// The bounds field is the product of all bounds in an array field. So for example, the field
///
/// 1 matrix : [3][3]float32
///
/// would have a bounds field of 3*3 = 9.
///
/// The bounds field is not present for non-array types.
pub bounds: Option<usize>,
}
/// A `RecordEncoding` describes the encoding of a particular record type. It may contain
/// references to other `RecordEncoding`s which can be resolved by consulting the `depends` field
/// of the containing `CompleteEncoding`.
#[derive(PartialEq,Eq)]
pub struct RecordEncoding {
/// Name of the record type in the.rex file, not used in the encoding.
pub name: String,
/// Required fields of this record type, sorted by id.
pub req_fields: Vec<FieldEncoding>,
/// Optional and repeated fields of this record type, sorted by id.
pub opt_rep_fields: Vec<FieldEncoding>,
}
impl RecordEncoding {
fn sort_fields(&mut self) {
self.req_fields.sort_by(|x, y| x.id.cmp(&y.id));
self.opt_rep_fields.sort_by(|x, y| x.id.cmp(&y.id));
}
}
/// A `CompleteEncoding` provides all of the information necessary to encode or decode a particular
/// record type (and every record type that it can contain).
#[derive(PartialEq,Eq)]
pub struct CompleteEncoding {
/// The record type that this CompleteEncoding describes.
pub target: RecordEncoding,
/// Encodings for all dependencies of target. If a field has a type (t >= Type::FirstUnused),
/// then a RecordEncoding for that type is at depends[t - Type::FirstUnused].
pub depends: Vec<RecordEncoding>,
}
impl CompleteEncoding {
/// The `req_fields` and `opt_rep_fields` fields of a `RecordEncoding` are expected to be
/// sorted. This method performs that sort on `target` and each member of `depends`.
pub fn sort_fields(&mut self) {
self.target.sort_fields();
for dep in self.depends.iter_mut() {
dep.sort_fields();
}
}
}
pub use encoding::doc_workaround::COMPLETE_ENC;
mod doc_workaround {
#![allow(missing_docs)]
use encoding::*;
use encoding::Quantifier::*;
// These are indices into COMPLETE_ENC.depends, below. See docs for that field on the
// CompleteEncoding type.
const FIELD_ENCODING_TYP: Type = Type::Record{index: 0};
const RECORD_ENCODING_TYP: Type = Type::Record{index: 1};
lazy_static! {
// I apologize in advance for the confusing-ness of this comment.
//
// Encodings for records are themselves encoded, so we need to solve the chicken/egg problem in
// order to be able to interpret the encodings of encodings :3.
//
// We do this by providing a pre-decoded encoding for encodings. That's what this lovely
// structure is.
pub static ref COMPLETE_ENC: CompleteEncoding = CompleteEncoding {
target: RecordEncoding {
name: "CompleteEncoding".to_string(),
req_fields: vec![
FieldEncoding {
id: FieldID(1),
name: "target".to_string(),
quant: Required,
typ: RECORD_ENCODING_TYP,
bounds: None
},
],
opt_rep_fields: vec![
FieldEncoding {
id: FieldID(2),
quant: Repeated,
name: "depends".to_string(),
typ: RECORD_ENCODING_TYP,
bounds: None
},
],
},
depends: vec![
RecordEncoding {
name: "FieldEncoding".to_string(),
req_fields: vec![
FieldEncoding {
id: FieldID(1),
name: "id".to_string(),
quant: Required,
typ: Type::UInt64,
bounds: None
},
FieldEncoding {
id: FieldID(2),
name: "name".to_string(),
quant: Required,
typ: Type::String,
bounds: None
},
FieldEncoding {
id: FieldID(3),
name: "quant".to_string(),
quant: Required,
typ: Type::Enum,
bounds: None
},
FieldEncoding {
id: FieldID(4),
name: "typ".to_string(),
quant: Required,
typ: Type::Enum,
bounds: None
},
FieldEncoding {
id: FieldID(5),
name: "bounds".to_string(),
quant: Required,
typ: Type::UInt64,
bounds: None
},
],
opt_rep_fields: vec![]
},
RecordEncoding {
name: "RecordEncoding".to_string(),
req_fields: vec![
FieldEncoding {
id: FieldID(1),
name: "name".to_string(),
quant: Required,
typ: Type::String,
bounds: None
},
|
id: FieldID(2),
name: "req_fields".to_string(),
quant: Repeated,
typ: FIELD_ENCODING_TYP,
bounds: None
},
FieldEncoding {
id: FieldID(3),
name: "opt_rep_fields".to_string(),
quant: Repeated,
typ: FIELD_ENCODING_TYP,
bounds: None
},
],
},
],
};
}
}
|
],
opt_rep_fields: vec![
FieldEncoding {
|
random_line_split
|
range-type-infer.rs
|
// run-pass
#![allow(unused_must_use)]
// Make sure the type inference for the new range expression work as
// good as the old one. Check out issue #21672, #21595 and #21649 for
// more details.
fn
|
() {
let xs = (0..8).map(|i| i == 1u64).collect::<Vec<_>>();
assert_eq!(xs[1], true);
let xs = (0..8).map(|i| 1u64 == i).collect::<Vec<_>>();
assert_eq!(xs[1], true);
let xs: Vec<u8> = (0..10).collect();
assert_eq!(xs.len(), 10);
for x in 0..10 { x % 2; }
for x in 0..100 { x as f32; }
let array = [true, false];
for i in 0..1 { array[i]; }
}
|
main
|
identifier_name
|
range-type-infer.rs
|
// run-pass
#![allow(unused_must_use)]
// Make sure the type inference for the new range expression work as
|
// more details.
fn main() {
let xs = (0..8).map(|i| i == 1u64).collect::<Vec<_>>();
assert_eq!(xs[1], true);
let xs = (0..8).map(|i| 1u64 == i).collect::<Vec<_>>();
assert_eq!(xs[1], true);
let xs: Vec<u8> = (0..10).collect();
assert_eq!(xs.len(), 10);
for x in 0..10 { x % 2; }
for x in 0..100 { x as f32; }
let array = [true, false];
for i in 0..1 { array[i]; }
}
|
// good as the old one. Check out issue #21672, #21595 and #21649 for
|
random_line_split
|
range-type-infer.rs
|
// run-pass
#![allow(unused_must_use)]
// Make sure the type inference for the new range expression work as
// good as the old one. Check out issue #21672, #21595 and #21649 for
// more details.
fn main()
|
{
let xs = (0..8).map(|i| i == 1u64).collect::<Vec<_>>();
assert_eq!(xs[1], true);
let xs = (0..8).map(|i| 1u64 == i).collect::<Vec<_>>();
assert_eq!(xs[1], true);
let xs: Vec<u8> = (0..10).collect();
assert_eq!(xs.len(), 10);
for x in 0..10 { x % 2; }
for x in 0..100 { x as f32; }
let array = [true, false];
for i in 0..1 { array[i]; }
}
|
identifier_body
|
|
regions-variance-contravariant-use-covariant.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a type which is covariant with respect to its region
// parameter yields an error when used in a contravariant way.
//
// Note: see variance-regions-*.rs for the tests that check that the
// variance inference works in the first place.
// This is contravariant with respect to 'a, meaning that
// Contravariant<'long> <: Contravariant<'short> iff
//'short <= 'long
struct Contravariant<'a> {
f: &'a int
}
fn use_<'short,'long>(c: Contravariant<'short>,
s: &'short int,
l: &'long int,
_where:Option<&'short &'long ()>) {
// Test whether Contravariant<'short> <: Contravariant<'long>. Since
//'short <= 'long, this would be true if the Contravariant type were
// covariant with respect to its parameter 'a.
let _: Contravariant<'long> = c; //~ ERROR mismatched types
//~^ ERROR cannot infer an appropriate lifetime
}
fn main() {}
|
// http://rust-lang.org/COPYRIGHT.
|
random_line_split
|
regions-variance-contravariant-use-covariant.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that a type which is covariant with respect to its region
// parameter yields an error when used in a contravariant way.
//
// Note: see variance-regions-*.rs for the tests that check that the
// variance inference works in the first place.
// This is contravariant with respect to 'a, meaning that
// Contravariant<'long> <: Contravariant<'short> iff
//'short <= 'long
struct
|
<'a> {
f: &'a int
}
fn use_<'short,'long>(c: Contravariant<'short>,
s: &'short int,
l: &'long int,
_where:Option<&'short &'long ()>) {
// Test whether Contravariant<'short> <: Contravariant<'long>. Since
//'short <= 'long, this would be true if the Contravariant type were
// covariant with respect to its parameter 'a.
let _: Contravariant<'long> = c; //~ ERROR mismatched types
//~^ ERROR cannot infer an appropriate lifetime
}
fn main() {}
|
Contravariant
|
identifier_name
|
font_awesome.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::generated::css_classes::C;
use seed::{prelude::*, virtual_dom::Attrs, *};
pub fn font_awesome_outline<T>(more_attrs: Attrs, icon_name: &str) -> Node<T> {
font_awesome_base("regular", more_attrs, icon_name)
}
pub fn font_awesome<T>(more_attrs: Attrs, icon_name: &str) -> Node<T> {
font_awesome_base("solid", more_attrs, icon_name)
}
fn font_awesome_base<T>(sprite_sheet: &str, more_attrs: Attrs, icon_name: &str) -> Node<T> {
let mut attrs = class![C.fill_current];
attrs.merge(more_attrs);
svg![
attrs,
r#use![
class![C.pointer_events_none],
attrs! {
|
]
]
}
|
At::Href => format!("sprites/{}.svg#{}", sprite_sheet, icon_name),
}
|
random_line_split
|
font_awesome.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::generated::css_classes::C;
use seed::{prelude::*, virtual_dom::Attrs, *};
pub fn font_awesome_outline<T>(more_attrs: Attrs, icon_name: &str) -> Node<T> {
font_awesome_base("regular", more_attrs, icon_name)
}
pub fn font_awesome<T>(more_attrs: Attrs, icon_name: &str) -> Node<T> {
font_awesome_base("solid", more_attrs, icon_name)
}
fn
|
<T>(sprite_sheet: &str, more_attrs: Attrs, icon_name: &str) -> Node<T> {
let mut attrs = class![C.fill_current];
attrs.merge(more_attrs);
svg![
attrs,
r#use![
class![C.pointer_events_none],
attrs! {
At::Href => format!("sprites/{}.svg#{}", sprite_sheet, icon_name),
}
]
]
}
|
font_awesome_base
|
identifier_name
|
font_awesome.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::generated::css_classes::C;
use seed::{prelude::*, virtual_dom::Attrs, *};
pub fn font_awesome_outline<T>(more_attrs: Attrs, icon_name: &str) -> Node<T>
|
pub fn font_awesome<T>(more_attrs: Attrs, icon_name: &str) -> Node<T> {
font_awesome_base("solid", more_attrs, icon_name)
}
fn font_awesome_base<T>(sprite_sheet: &str, more_attrs: Attrs, icon_name: &str) -> Node<T> {
let mut attrs = class![C.fill_current];
attrs.merge(more_attrs);
svg![
attrs,
r#use![
class![C.pointer_events_none],
attrs! {
At::Href => format!("sprites/{}.svg#{}", sprite_sheet, icon_name),
}
]
]
}
|
{
font_awesome_base("regular", more_attrs, icon_name)
}
|
identifier_body
|
parser.rs
|
#![plugin(peg_syntax_ext)]
use std::fmt;
use std::collections::HashMap;
peg_file! gremlin("gremlin.rustpeg");
pub fn parse(g: &str) -> Result<ParsedGraphQuery, gremlin::ParseError> {
let parsed = pre_parse(g);
// verify all the steps actually make sense
// is it a query to a single vertex or a global query?
parsed
}
pub fn pre_parse(g: &str) -> Result<ParsedGraphQuery, gremlin::ParseError> {
gremlin::query(g)
}
/*
returned from the peg parser
we'll need to take each of the steps
and use them to construct an actual GraphQuery
*/
pub struct ParsedGraphQuery {
pub steps: Vec<RawStep>
}
/*
scope of the query. determines if we're looking at the
entire graph or just from a handful of vertices
|
}
/*
generic step used in ParsedGraphQuery
will be turned into specific steps
*/
#[derive(Debug)]
pub struct RawStep {
pub name: String,
pub args: Vec<Arg>,
}
#[derive(Debug, Display)]
pub enum Arg {
Integer(i64),
Float(f64),
String(String),
}
impl RawStep {
pub fn new(name: String, args: Vec<Arg>) -> RawStep {
RawStep{name:name, args:args}
}
}
impl fmt::Display for RawStep {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RawStep {}", self.name)
}
}
|
*/
pub enum Scope {
Global,
Vertex(Vec<i64>),
|
random_line_split
|
parser.rs
|
#![plugin(peg_syntax_ext)]
use std::fmt;
use std::collections::HashMap;
peg_file! gremlin("gremlin.rustpeg");
pub fn parse(g: &str) -> Result<ParsedGraphQuery, gremlin::ParseError> {
let parsed = pre_parse(g);
// verify all the steps actually make sense
// is it a query to a single vertex or a global query?
parsed
}
pub fn pre_parse(g: &str) -> Result<ParsedGraphQuery, gremlin::ParseError> {
gremlin::query(g)
}
/*
returned from the peg parser
we'll need to take each of the steps
and use them to construct an actual GraphQuery
*/
pub struct ParsedGraphQuery {
pub steps: Vec<RawStep>
}
/*
scope of the query. determines if we're looking at the
entire graph or just from a handful of vertices
*/
pub enum Scope {
Global,
Vertex(Vec<i64>),
}
/*
generic step used in ParsedGraphQuery
will be turned into specific steps
*/
#[derive(Debug)]
pub struct
|
{
pub name: String,
pub args: Vec<Arg>,
}
#[derive(Debug, Display)]
pub enum Arg {
Integer(i64),
Float(f64),
String(String),
}
impl RawStep {
pub fn new(name: String, args: Vec<Arg>) -> RawStep {
RawStep{name:name, args:args}
}
}
impl fmt::Display for RawStep {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RawStep {}", self.name)
}
}
|
RawStep
|
identifier_name
|
parser.rs
|
#![plugin(peg_syntax_ext)]
use std::fmt;
use std::collections::HashMap;
peg_file! gremlin("gremlin.rustpeg");
pub fn parse(g: &str) -> Result<ParsedGraphQuery, gremlin::ParseError> {
let parsed = pre_parse(g);
// verify all the steps actually make sense
// is it a query to a single vertex or a global query?
parsed
}
pub fn pre_parse(g: &str) -> Result<ParsedGraphQuery, gremlin::ParseError> {
gremlin::query(g)
}
/*
returned from the peg parser
we'll need to take each of the steps
and use them to construct an actual GraphQuery
*/
pub struct ParsedGraphQuery {
pub steps: Vec<RawStep>
}
/*
scope of the query. determines if we're looking at the
entire graph or just from a handful of vertices
*/
pub enum Scope {
Global,
Vertex(Vec<i64>),
}
/*
generic step used in ParsedGraphQuery
will be turned into specific steps
*/
#[derive(Debug)]
pub struct RawStep {
pub name: String,
pub args: Vec<Arg>,
}
#[derive(Debug, Display)]
pub enum Arg {
Integer(i64),
Float(f64),
String(String),
}
impl RawStep {
pub fn new(name: String, args: Vec<Arg>) -> RawStep {
RawStep{name:name, args:args}
}
}
impl fmt::Display for RawStep {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
|
{
write!(f, "RawStep {}", self.name)
}
|
identifier_body
|
config.rs
|
use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr};
use std::str::FromStr;
use std::any::TypeId;
use std::mem::swap;
use std::time::Duration;
use anymap::Map;
use anymap::any::{Any, UncheckedAnyExt};
///HTTP or HTTPS.
pub enum Scheme {
///Standard HTTP.
Http,
///HTTP with SSL encryption.
#[cfg(feature = "ssl")]
Https {
///Path to SSL certificate.
cert: ::std::path::PathBuf,
///Path to key file.
key: ::std::path::PathBuf
}
}
///A host address and a port.
///
///Can be conveniently converted from an existing address-port pair or just a port:
///
///```
///use std::net::Ipv4Addr;
///use rustful::server::Host;
///
///let host1: Host = (Ipv4Addr::new(0, 0, 0, 0), 80).into();
///let host2: Host = 80.into();
///
///assert_eq!(host1, host2);
///```
#[derive(Eq, PartialEq, Debug, Hash, Clone, Copy)]
pub struct Host(SocketAddr);
impl Host {
///Create a `Host` with the address `0.0.0.0:port`. This is the same as `port.into()`.
pub fn any_v4(port: u16) -> Host {
Host(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port)))
}
///Change the port of the host address.
pub fn port(&mut self, port: u16) {
self.0 = match self.0 {
SocketAddr::V4(addr) => SocketAddr::V4(SocketAddrV4::new(addr.ip().clone(), port)),
SocketAddr::V6(addr) => {
SocketAddr::V6(SocketAddrV6::new(addr.ip().clone(), port, addr.flowinfo(), addr.scope_id()))
}
};
}
}
impl From<Host> for SocketAddr {
fn from(host: Host) -> SocketAddr {
host.0
}
}
impl From<u16> for Host {
fn from(port: u16) -> Host {
Host::any_v4(port)
}
}
impl From<SocketAddr> for Host {
fn from(addr: SocketAddr) -> Host {
Host(addr)
}
}
impl From<SocketAddrV4> for Host {
fn from(addr: SocketAddrV4) -> Host {
Host(SocketAddr::V4(addr))
}
}
impl From<SocketAddrV6> for Host {
fn from(addr: SocketAddrV6) -> Host {
Host(SocketAddr::V6(addr))
}
}
impl From<(Ipv4Addr, u16)> for Host {
fn from((ip, port): (Ipv4Addr, u16)) -> Host {
Host(SocketAddr::V4(SocketAddrV4::new(ip, port)))
}
}
impl FromStr for Host {
type Err = <SocketAddr as FromStr>::Err;
fn from_str(s: &str) -> Result<Host, Self::Err> {
s.parse().map(|s| Host(s))
}
}
///A somewhat lazy container for globally accessible data.
///
///It will try to be as simple as possible and allocate as little as possible,
///depending on the number of stored values.
///
/// * No value: Nothing is allocated and nothing is searched for during
///access.
///
/// * One value: One `Box` is allocated. Searching for a value will only
///consist of a comparison of `TypeId` and a downcast.
///
/// * Multiple values: An `AnyMap` is created, as well as a `Box` for each
///value. Searching for a value has the full overhead of `AnyMap`.
///
///`Global` can be created from a boxed value, from tuples or using the
///`Default` trait. More values can then be added using `insert(value)`.
///
///```
///use rustful::server::Global;
///let mut g1: Global = Box::new(5).into();
///assert_eq!(g1.get(), Some(&5));
///assert_eq!(g1.get::<&str>(), None);
///
///let old = g1.insert(10);
///assert_eq!(old, Some(5));
///assert_eq!(g1.get(), Some(&10));
///
///g1.insert("cat");
///assert_eq!(g1.get(), Some(&10));
///assert_eq!(g1.get(), Some(&"cat"));
///
///let g2: Global = (5, "cat").into();
///assert_eq!(g2.get(), Some(&5));
///assert_eq!(g2.get(), Some(&"cat"));
///```
pub struct Global(GlobalState);
impl Global {
///Borrow a value of type `T` if the there is one.
pub fn get<T: Any + Send + Sync>(&self) -> Option<&T> {
match self.0 {
GlobalState::None => None,
GlobalState::One(id, ref a) => if id == TypeId::of::<T>() {
//Here be dragons!
unsafe { Some(a.downcast_ref_unchecked()) }
} else {
None
},
GlobalState::Many(ref map) => map.get()
}
}
///Insert a new value, returning the previous value of the same type, if
///any.
pub fn insert<T: Any + Send + Sync>(&mut self, value: T) -> Option<T> {
match self.0 {
GlobalState::None => {
*self = Box::new(value).into();
None
},
GlobalState::One(id, _) => if id == TypeId::of::<T>() {
if let GlobalState::One(_, ref mut previous_value) = self.0 {
let mut v = Box::new(value) as Box<Any + Send + Sync>;
swap(previous_value, &mut v);
Some(unsafe { *v.downcast_unchecked() })
} else {
unreachable!()
}
} else {
//Here be more dragons!
let mut other = GlobalState::Many(Map::new());
swap(&mut self.0, &mut other);
if let GlobalState::Many(ref mut map) = self.0 {
if let GlobalState::One(id, previous_value) = other {
let mut raw = map.as_mut();
unsafe { raw.insert(id, previous_value); }
}
map.insert(value)
} else {
unreachable!()
}
},
GlobalState::Many(ref mut map) => {
map.insert(value)
}
}
}
}
impl<T: Any + Send + Sync> From<Box<T>> for Global {
fn from(data: Box<T>) -> Global {
Global(GlobalState::One(TypeId::of::<T>(), data))
}
}
macro_rules! from_tuple {
($first: ident, $($t: ident),+) => (
impl<$first: Any + Send + Sync, $($t: Any + Send + Sync),+> From<($first, $($t),+)> for Global {
#[allow(non_snake_case)]
fn from(tuple: ($first, $($t),+))-> Global {
let ($first, $($t),+) = tuple;
let mut map = Map::new();
map.insert($first);
$(
map.insert($t);
)+
Global(GlobalState::Many(map))
}
}
from_tuple!($($t),+);
);
($ty: ident) => (
impl<$ty: Any + Send + Sync> From<($ty,)> for Global {
fn from(tuple: ($ty,)) -> Global {
Box::new(tuple.0).into()
}
}
);
}
impl From<()> for Global {
fn from(_: ()) -> Global {
Global(GlobalState::None)
}
}
from_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11);
impl Default for Global {
fn default() -> Global {
Global(GlobalState::None)
}
}
enum GlobalState {
None,
One(TypeId, Box<Any + Send + Sync>),
Many(Map<Any + Send + Sync>),
|
///Settings for `keep-alive` connections to the server.
pub struct KeepAlive {
///How long a `keep-alive` connection may idle before it's forced close.
pub timeout: Duration,
///The number of threads in the thread pool that should be kept free from
///idling threads. Connections will be closed if the number of idle
///threads goes below this.
pub free_threads: usize,
}
|
}
|
random_line_split
|
config.rs
|
use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr};
use std::str::FromStr;
use std::any::TypeId;
use std::mem::swap;
use std::time::Duration;
use anymap::Map;
use anymap::any::{Any, UncheckedAnyExt};
///HTTP or HTTPS.
pub enum Scheme {
///Standard HTTP.
Http,
///HTTP with SSL encryption.
#[cfg(feature = "ssl")]
Https {
///Path to SSL certificate.
cert: ::std::path::PathBuf,
///Path to key file.
key: ::std::path::PathBuf
}
}
///A host address and a port.
///
///Can be conveniently converted from an existing address-port pair or just a port:
///
///```
///use std::net::Ipv4Addr;
///use rustful::server::Host;
///
///let host1: Host = (Ipv4Addr::new(0, 0, 0, 0), 80).into();
///let host2: Host = 80.into();
///
///assert_eq!(host1, host2);
///```
#[derive(Eq, PartialEq, Debug, Hash, Clone, Copy)]
pub struct Host(SocketAddr);
impl Host {
///Create a `Host` with the address `0.0.0.0:port`. This is the same as `port.into()`.
pub fn any_v4(port: u16) -> Host {
Host(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port)))
}
///Change the port of the host address.
pub fn port(&mut self, port: u16) {
self.0 = match self.0 {
SocketAddr::V4(addr) => SocketAddr::V4(SocketAddrV4::new(addr.ip().clone(), port)),
SocketAddr::V6(addr) => {
SocketAddr::V6(SocketAddrV6::new(addr.ip().clone(), port, addr.flowinfo(), addr.scope_id()))
}
};
}
}
impl From<Host> for SocketAddr {
fn from(host: Host) -> SocketAddr {
host.0
}
}
impl From<u16> for Host {
fn from(port: u16) -> Host {
Host::any_v4(port)
}
}
impl From<SocketAddr> for Host {
fn from(addr: SocketAddr) -> Host {
Host(addr)
}
}
impl From<SocketAddrV4> for Host {
fn from(addr: SocketAddrV4) -> Host {
Host(SocketAddr::V4(addr))
}
}
impl From<SocketAddrV6> for Host {
fn from(addr: SocketAddrV6) -> Host {
Host(SocketAddr::V6(addr))
}
}
impl From<(Ipv4Addr, u16)> for Host {
fn from((ip, port): (Ipv4Addr, u16)) -> Host {
Host(SocketAddr::V4(SocketAddrV4::new(ip, port)))
}
}
impl FromStr for Host {
type Err = <SocketAddr as FromStr>::Err;
fn from_str(s: &str) -> Result<Host, Self::Err> {
s.parse().map(|s| Host(s))
}
}
///A somewhat lazy container for globally accessible data.
///
///It will try to be as simple as possible and allocate as little as possible,
///depending on the number of stored values.
///
/// * No value: Nothing is allocated and nothing is searched for during
///access.
///
/// * One value: One `Box` is allocated. Searching for a value will only
///consist of a comparison of `TypeId` and a downcast.
///
/// * Multiple values: An `AnyMap` is created, as well as a `Box` for each
///value. Searching for a value has the full overhead of `AnyMap`.
///
///`Global` can be created from a boxed value, from tuples or using the
///`Default` trait. More values can then be added using `insert(value)`.
///
///```
///use rustful::server::Global;
///let mut g1: Global = Box::new(5).into();
///assert_eq!(g1.get(), Some(&5));
///assert_eq!(g1.get::<&str>(), None);
///
///let old = g1.insert(10);
///assert_eq!(old, Some(5));
///assert_eq!(g1.get(), Some(&10));
///
///g1.insert("cat");
///assert_eq!(g1.get(), Some(&10));
///assert_eq!(g1.get(), Some(&"cat"));
///
///let g2: Global = (5, "cat").into();
///assert_eq!(g2.get(), Some(&5));
///assert_eq!(g2.get(), Some(&"cat"));
///```
pub struct Global(GlobalState);
impl Global {
///Borrow a value of type `T` if the there is one.
pub fn get<T: Any + Send + Sync>(&self) -> Option<&T> {
match self.0 {
GlobalState::None => None,
GlobalState::One(id, ref a) => if id == TypeId::of::<T>() {
//Here be dragons!
unsafe { Some(a.downcast_ref_unchecked()) }
} else {
None
},
GlobalState::Many(ref map) => map.get()
}
}
///Insert a new value, returning the previous value of the same type, if
///any.
pub fn insert<T: Any + Send + Sync>(&mut self, value: T) -> Option<T> {
match self.0 {
GlobalState::None => {
*self = Box::new(value).into();
None
},
GlobalState::One(id, _) => if id == TypeId::of::<T>() {
if let GlobalState::One(_, ref mut previous_value) = self.0 {
let mut v = Box::new(value) as Box<Any + Send + Sync>;
swap(previous_value, &mut v);
Some(unsafe { *v.downcast_unchecked() })
} else {
unreachable!()
}
} else {
//Here be more dragons!
let mut other = GlobalState::Many(Map::new());
swap(&mut self.0, &mut other);
if let GlobalState::Many(ref mut map) = self.0 {
if let GlobalState::One(id, previous_value) = other {
let mut raw = map.as_mut();
unsafe { raw.insert(id, previous_value); }
}
map.insert(value)
} else {
unreachable!()
}
},
GlobalState::Many(ref mut map) => {
map.insert(value)
}
}
}
}
impl<T: Any + Send + Sync> From<Box<T>> for Global {
fn from(data: Box<T>) -> Global {
Global(GlobalState::One(TypeId::of::<T>(), data))
}
}
macro_rules! from_tuple {
($first: ident, $($t: ident),+) => (
impl<$first: Any + Send + Sync, $($t: Any + Send + Sync),+> From<($first, $($t),+)> for Global {
#[allow(non_snake_case)]
fn from(tuple: ($first, $($t),+))-> Global {
let ($first, $($t),+) = tuple;
let mut map = Map::new();
map.insert($first);
$(
map.insert($t);
)+
Global(GlobalState::Many(map))
}
}
from_tuple!($($t),+);
);
($ty: ident) => (
impl<$ty: Any + Send + Sync> From<($ty,)> for Global {
fn from(tuple: ($ty,)) -> Global {
Box::new(tuple.0).into()
}
}
);
}
impl From<()> for Global {
fn from(_: ()) -> Global {
Global(GlobalState::None)
}
}
from_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11);
impl Default for Global {
fn
|
() -> Global {
Global(GlobalState::None)
}
}
enum GlobalState {
None,
One(TypeId, Box<Any + Send + Sync>),
Many(Map<Any + Send + Sync>),
}
///Settings for `keep-alive` connections to the server.
pub struct KeepAlive {
///How long a `keep-alive` connection may idle before it's forced close.
pub timeout: Duration,
///The number of threads in the thread pool that should be kept free from
///idling threads. Connections will be closed if the number of idle
///threads goes below this.
pub free_threads: usize,
}
|
default
|
identifier_name
|
config.rs
|
use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr};
use std::str::FromStr;
use std::any::TypeId;
use std::mem::swap;
use std::time::Duration;
use anymap::Map;
use anymap::any::{Any, UncheckedAnyExt};
///HTTP or HTTPS.
pub enum Scheme {
///Standard HTTP.
Http,
///HTTP with SSL encryption.
#[cfg(feature = "ssl")]
Https {
///Path to SSL certificate.
cert: ::std::path::PathBuf,
///Path to key file.
key: ::std::path::PathBuf
}
}
///A host address and a port.
///
///Can be conveniently converted from an existing address-port pair or just a port:
///
///```
///use std::net::Ipv4Addr;
///use rustful::server::Host;
///
///let host1: Host = (Ipv4Addr::new(0, 0, 0, 0), 80).into();
///let host2: Host = 80.into();
///
///assert_eq!(host1, host2);
///```
#[derive(Eq, PartialEq, Debug, Hash, Clone, Copy)]
pub struct Host(SocketAddr);
impl Host {
///Create a `Host` with the address `0.0.0.0:port`. This is the same as `port.into()`.
pub fn any_v4(port: u16) -> Host {
Host(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port)))
}
///Change the port of the host address.
pub fn port(&mut self, port: u16) {
self.0 = match self.0 {
SocketAddr::V4(addr) => SocketAddr::V4(SocketAddrV4::new(addr.ip().clone(), port)),
SocketAddr::V6(addr) => {
SocketAddr::V6(SocketAddrV6::new(addr.ip().clone(), port, addr.flowinfo(), addr.scope_id()))
}
};
}
}
impl From<Host> for SocketAddr {
fn from(host: Host) -> SocketAddr {
host.0
}
}
impl From<u16> for Host {
fn from(port: u16) -> Host {
Host::any_v4(port)
}
}
impl From<SocketAddr> for Host {
fn from(addr: SocketAddr) -> Host {
Host(addr)
}
}
impl From<SocketAddrV4> for Host {
fn from(addr: SocketAddrV4) -> Host {
Host(SocketAddr::V4(addr))
}
}
impl From<SocketAddrV6> for Host {
fn from(addr: SocketAddrV6) -> Host {
Host(SocketAddr::V6(addr))
}
}
impl From<(Ipv4Addr, u16)> for Host {
fn from((ip, port): (Ipv4Addr, u16)) -> Host {
Host(SocketAddr::V4(SocketAddrV4::new(ip, port)))
}
}
impl FromStr for Host {
type Err = <SocketAddr as FromStr>::Err;
fn from_str(s: &str) -> Result<Host, Self::Err> {
s.parse().map(|s| Host(s))
}
}
///A somewhat lazy container for globally accessible data.
///
///It will try to be as simple as possible and allocate as little as possible,
///depending on the number of stored values.
///
/// * No value: Nothing is allocated and nothing is searched for during
///access.
///
/// * One value: One `Box` is allocated. Searching for a value will only
///consist of a comparison of `TypeId` and a downcast.
///
/// * Multiple values: An `AnyMap` is created, as well as a `Box` for each
///value. Searching for a value has the full overhead of `AnyMap`.
///
///`Global` can be created from a boxed value, from tuples or using the
///`Default` trait. More values can then be added using `insert(value)`.
///
///```
///use rustful::server::Global;
///let mut g1: Global = Box::new(5).into();
///assert_eq!(g1.get(), Some(&5));
///assert_eq!(g1.get::<&str>(), None);
///
///let old = g1.insert(10);
///assert_eq!(old, Some(5));
///assert_eq!(g1.get(), Some(&10));
///
///g1.insert("cat");
///assert_eq!(g1.get(), Some(&10));
///assert_eq!(g1.get(), Some(&"cat"));
///
///let g2: Global = (5, "cat").into();
///assert_eq!(g2.get(), Some(&5));
///assert_eq!(g2.get(), Some(&"cat"));
///```
pub struct Global(GlobalState);
impl Global {
///Borrow a value of type `T` if the there is one.
pub fn get<T: Any + Send + Sync>(&self) -> Option<&T> {
match self.0 {
GlobalState::None => None,
GlobalState::One(id, ref a) => if id == TypeId::of::<T>() {
//Here be dragons!
unsafe { Some(a.downcast_ref_unchecked()) }
} else {
None
},
GlobalState::Many(ref map) => map.get()
}
}
///Insert a new value, returning the previous value of the same type, if
///any.
pub fn insert<T: Any + Send + Sync>(&mut self, value: T) -> Option<T>
|
let mut raw = map.as_mut();
unsafe { raw.insert(id, previous_value); }
}
map.insert(value)
} else {
unreachable!()
}
},
GlobalState::Many(ref mut map) => {
map.insert(value)
}
}
}
}
impl<T: Any + Send + Sync> From<Box<T>> for Global {
fn from(data: Box<T>) -> Global {
Global(GlobalState::One(TypeId::of::<T>(), data))
}
}
macro_rules! from_tuple {
($first: ident, $($t: ident),+) => (
impl<$first: Any + Send + Sync, $($t: Any + Send + Sync),+> From<($first, $($t),+)> for Global {
#[allow(non_snake_case)]
fn from(tuple: ($first, $($t),+))-> Global {
let ($first, $($t),+) = tuple;
let mut map = Map::new();
map.insert($first);
$(
map.insert($t);
)+
Global(GlobalState::Many(map))
}
}
from_tuple!($($t),+);
);
($ty: ident) => (
impl<$ty: Any + Send + Sync> From<($ty,)> for Global {
fn from(tuple: ($ty,)) -> Global {
Box::new(tuple.0).into()
}
}
);
}
impl From<()> for Global {
fn from(_: ()) -> Global {
Global(GlobalState::None)
}
}
from_tuple!(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11);
impl Default for Global {
fn default() -> Global {
Global(GlobalState::None)
}
}
enum GlobalState {
None,
One(TypeId, Box<Any + Send + Sync>),
Many(Map<Any + Send + Sync>),
}
///Settings for `keep-alive` connections to the server.
pub struct KeepAlive {
///How long a `keep-alive` connection may idle before it's forced close.
pub timeout: Duration,
///The number of threads in the thread pool that should be kept free from
///idling threads. Connections will be closed if the number of idle
///threads goes below this.
pub free_threads: usize,
}
|
{
match self.0 {
GlobalState::None => {
*self = Box::new(value).into();
None
},
GlobalState::One(id, _) => if id == TypeId::of::<T>() {
if let GlobalState::One(_, ref mut previous_value) = self.0 {
let mut v = Box::new(value) as Box<Any + Send + Sync>;
swap(previous_value, &mut v);
Some(unsafe { *v.downcast_unchecked() })
} else {
unreachable!()
}
} else {
//Here be more dragons!
let mut other = GlobalState::Many(Map::new());
swap(&mut self.0, &mut other);
if let GlobalState::Many(ref mut map) = self.0 {
if let GlobalState::One(id, previous_value) = other {
|
identifier_body
|
workqueue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A work queue for scheduling units of work across threads in a fork-join fashion.
//!
//! Data associated with queues is simply a pair of unsigned integers. It is expected that a
//! higher-level API on top of this could allow safe fork-join parallelism.
use native;
use rand;
use rand::{Rng, XorShiftRng};
use std::cast;
use std::comm;
use std::mem;
use std::sync::atomics::{AtomicUint, SeqCst};
use std::sync::deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
use std::task::TaskOpts;
/// A unit of work.
///
/// The type parameter `QUD` stands for "queue user data" and represents global custom data for the
/// entire work queue, and the type parameter `WUD` stands for "work user data" and represents
/// custom data specific to each unit of work.
pub struct WorkUnit<QUD,WUD> {
/// The function to execute.
fun: extern "Rust" fn(WUD, &mut WorkerProxy<QUD,WUD>),
/// Arbitrary data.
data: WUD,
}
/// Messages from the supervisor to the worker.
enum WorkerMsg<QUD,WUD> {
/// Tells the worker to start work.
StartMsg(Worker<WorkUnit<QUD,WUD>>, *mut AtomicUint, *QUD),
/// Tells the worker to stop. It can be restarted again with a `StartMsg`.
StopMsg,
/// Tells the worker thread to terminate.
ExitMsg,
}
/// Messages to the supervisor.
enum SupervisorMsg<QUD,WUD> {
FinishedMsg,
ReturnDequeMsg(uint, Worker<WorkUnit<QUD,WUD>>),
}
/// Information that the supervisor thread keeps about the worker threads.
struct WorkerInfo<QUD,WUD> {
/// The communication channel to the workers.
chan: Sender<WorkerMsg<QUD,WUD>>,
/// The buffer pool for this deque.
pool: BufferPool<WorkUnit<QUD,WUD>>,
/// The worker end of the deque, if we have it.
deque: Option<Worker<WorkUnit<QUD,WUD>>>,
/// The thief end of the work-stealing deque.
thief: Stealer<WorkUnit<QUD,WUD>>,
}
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QUD,WUD> {
/// The index of this worker.
index: uint,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QUD,WUD>>,
/// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QUD,WUD>>,
/// The thief end of the work-stealing deque for all other workers.
other_deques: ~[Stealer<WorkUnit<QUD,WUD>>],
/// The random number generator for this worker.
rng: XorShiftRng,
}
static SPIN_COUNT: uint = 1000;
impl<QUD:Send,WUD:Send> WorkerThread<QUD,WUD> {
/// The main logic. This function starts up the worker and listens for
/// messages.
pub fn start(&mut self) {
loop {
// Wait for a start message.
let (mut deque, ref_count, queue_data) = match self.port.recv() {
StartMsg(deque, ref_count, queue_data) => (deque, ref_count, queue_data),
StopMsg => fail!("unexpected stop message"),
ExitMsg => return,
};
// We're off!
//
// FIXME(pcwalton): Can't use labeled break or continue cross-crate due to a Rust bug.
loop {
// FIXME(pcwalton): Nasty workaround for the lack of labeled break/continue
// cross-crate.
let mut work_unit = unsafe {
mem::uninit()
};
match deque.pop() {
Some(work) => work_unit = work,
None => {
// Become a thief.
let mut i = 0;
let mut should_continue = true;
loop {
let victim = (self.rng.next_u32() as uint) % self.other_deques.len();
match self.other_deques[victim].steal() {
Empty | Abort => {
// Continue.
}
Data(work) => {
work_unit = work;
break
}
}
if i == SPIN_COUNT {
match self.port.try_recv() {
comm::Data(StopMsg) => {
should_continue = false;
break
}
comm::Data(ExitMsg) => return,
comm::Data(_) => fail!("unexpected message"),
_ => {}
}
i = 0
} else {
i += 1
}
}
if!should_continue {
break
}
}
}
// At this point, we have some work. Perform it.
let mut proxy = WorkerProxy {
worker: &mut deque,
ref_count: ref_count,
queue_data: queue_data,
};
(work_unit.fun)(work_unit.data, &mut proxy);
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, SeqCst) == 1 {
self.chan.send(FinishedMsg)
}
}
}
// Give the deque back to the supervisor.
self.chan.send(ReturnDequeMsg(self.index, deque))
}
}
}
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a,QUD,WUD> {
priv worker: &'a mut Worker<WorkUnit<QUD,WUD>>,
priv ref_count: *mut AtomicUint,
priv queue_data: *QUD,
}
impl<'a,QUD,WUD:Send> WorkerProxy<'a,QUD,WUD> {
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) {
unsafe {
drop((*self.ref_count).fetch_add(1, SeqCst));
}
self.worker.push(work_unit);
}
/// Retrieves the queue user data.
#[inline]
pub fn user_data<'a>(&'a self) -> &'a QUD {
unsafe {
cast::transmute(self.queue_data)
}
}
}
/// A work queue on which units of work can be submitted.
pub struct WorkQueue<QUD,WUD> {
/// Information about each of the workers.
priv workers: ~[WorkerInfo<QUD,WUD>],
/// A port on which deques can be received from the workers.
priv port: Receiver<SupervisorMsg<QUD,WUD>>,
/// The amount of work that has been enqueued.
priv work_count: uint,
/// Arbitrary user data.
data: QUD,
}
impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
/// Creates a new work queue and spawns all the threads associated with
/// it.
pub fn new(task_name: &'static str, thread_count: uint, user_data: QUD) -> WorkQueue<QUD,WUD> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (~[], ~[]);
for i in range(0, thread_count) {
let (worker_chan, worker_port) = channel();
let mut pool = BufferPool::new();
let (worker, thief) = pool.deque();
infos.push(WorkerInfo {
chan: worker_chan,
pool: pool,
deque: Some(worker),
thief: thief,
});
threads.push(WorkerThread {
index: i,
port: worker_port,
chan: supervisor_chan.clone(),
other_deques: ~[],
rng: rand::weak_rng(),
});
}
// Connect workers to one another.
for i in range(0, thread_count) {
for j in range(0, thread_count) {
if i!= j {
threads[i].other_deques.push(infos[j].thief.clone())
}
}
assert!(threads[i].other_deques.len() == thread_count - 1)
}
// Spawn threads.
for thread in threads.move_iter() {
let mut opts = TaskOpts::new();
opts.name = Some(task_name.into_maybe_owned());
native::task::spawn_opts(opts, proc() {
let mut thread = thread;
thread.start()
})
}
WorkQueue {
workers: infos,
port: supervisor_port,
work_count: 0,
data: user_data,
}
}
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) {
match self.workers[0].deque {
None => {
fail!("tried to push a block but we don't have the deque?!")
}
Some(ref mut deque) => deque.push(work_unit),
}
self.work_count += 1
}
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn
|
(&mut self) {
// Tell the workers to start.
let mut work_count = AtomicUint::new(self.work_count);
for worker in self.workers.mut_iter() {
worker.chan.send(StartMsg(worker.deque.take_unwrap(), &mut work_count, &self.data))
}
// Wait for the work to finish.
drop(self.port.recv());
self.work_count = 0;
// Tell everyone to stop.
for worker in self.workers.iter() {
worker.chan.send(StopMsg)
}
// Get our deques back.
for _ in range(0, self.workers.len()) {
match self.port.recv() {
ReturnDequeMsg(index, deque) => self.workers[index].deque = Some(deque),
FinishedMsg => fail!("unexpected finished message!"),
}
}
}
pub fn shutdown(&mut self) {
for worker in self.workers.iter() {
worker.chan.send(ExitMsg)
}
}
}
|
run
|
identifier_name
|
workqueue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A work queue for scheduling units of work across threads in a fork-join fashion.
//!
//! Data associated with queues is simply a pair of unsigned integers. It is expected that a
//! higher-level API on top of this could allow safe fork-join parallelism.
use native;
use rand;
use rand::{Rng, XorShiftRng};
use std::cast;
use std::comm;
use std::mem;
use std::sync::atomics::{AtomicUint, SeqCst};
use std::sync::deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
use std::task::TaskOpts;
/// A unit of work.
///
/// The type parameter `QUD` stands for "queue user data" and represents global custom data for the
/// entire work queue, and the type parameter `WUD` stands for "work user data" and represents
/// custom data specific to each unit of work.
pub struct WorkUnit<QUD,WUD> {
/// The function to execute.
fun: extern "Rust" fn(WUD, &mut WorkerProxy<QUD,WUD>),
/// Arbitrary data.
data: WUD,
}
/// Messages from the supervisor to the worker.
enum WorkerMsg<QUD,WUD> {
/// Tells the worker to start work.
StartMsg(Worker<WorkUnit<QUD,WUD>>, *mut AtomicUint, *QUD),
/// Tells the worker to stop. It can be restarted again with a `StartMsg`.
StopMsg,
/// Tells the worker thread to terminate.
ExitMsg,
}
/// Messages to the supervisor.
enum SupervisorMsg<QUD,WUD> {
FinishedMsg,
ReturnDequeMsg(uint, Worker<WorkUnit<QUD,WUD>>),
}
/// Information that the supervisor thread keeps about the worker threads.
struct WorkerInfo<QUD,WUD> {
/// The communication channel to the workers.
chan: Sender<WorkerMsg<QUD,WUD>>,
/// The buffer pool for this deque.
pool: BufferPool<WorkUnit<QUD,WUD>>,
/// The worker end of the deque, if we have it.
deque: Option<Worker<WorkUnit<QUD,WUD>>>,
/// The thief end of the work-stealing deque.
thief: Stealer<WorkUnit<QUD,WUD>>,
}
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QUD,WUD> {
/// The index of this worker.
index: uint,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QUD,WUD>>,
/// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QUD,WUD>>,
/// The thief end of the work-stealing deque for all other workers.
other_deques: ~[Stealer<WorkUnit<QUD,WUD>>],
/// The random number generator for this worker.
rng: XorShiftRng,
}
static SPIN_COUNT: uint = 1000;
impl<QUD:Send,WUD:Send> WorkerThread<QUD,WUD> {
/// The main logic. This function starts up the worker and listens for
/// messages.
pub fn start(&mut self) {
loop {
// Wait for a start message.
let (mut deque, ref_count, queue_data) = match self.port.recv() {
StartMsg(deque, ref_count, queue_data) => (deque, ref_count, queue_data),
StopMsg => fail!("unexpected stop message"),
|
//
// FIXME(pcwalton): Can't use labeled break or continue cross-crate due to a Rust bug.
loop {
// FIXME(pcwalton): Nasty workaround for the lack of labeled break/continue
// cross-crate.
let mut work_unit = unsafe {
mem::uninit()
};
match deque.pop() {
Some(work) => work_unit = work,
None => {
// Become a thief.
let mut i = 0;
let mut should_continue = true;
loop {
let victim = (self.rng.next_u32() as uint) % self.other_deques.len();
match self.other_deques[victim].steal() {
Empty | Abort => {
// Continue.
}
Data(work) => {
work_unit = work;
break
}
}
if i == SPIN_COUNT {
match self.port.try_recv() {
comm::Data(StopMsg) => {
should_continue = false;
break
}
comm::Data(ExitMsg) => return,
comm::Data(_) => fail!("unexpected message"),
_ => {}
}
i = 0
} else {
i += 1
}
}
if!should_continue {
break
}
}
}
// At this point, we have some work. Perform it.
let mut proxy = WorkerProxy {
worker: &mut deque,
ref_count: ref_count,
queue_data: queue_data,
};
(work_unit.fun)(work_unit.data, &mut proxy);
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, SeqCst) == 1 {
self.chan.send(FinishedMsg)
}
}
}
// Give the deque back to the supervisor.
self.chan.send(ReturnDequeMsg(self.index, deque))
}
}
}
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a,QUD,WUD> {
priv worker: &'a mut Worker<WorkUnit<QUD,WUD>>,
priv ref_count: *mut AtomicUint,
priv queue_data: *QUD,
}
impl<'a,QUD,WUD:Send> WorkerProxy<'a,QUD,WUD> {
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) {
unsafe {
drop((*self.ref_count).fetch_add(1, SeqCst));
}
self.worker.push(work_unit);
}
/// Retrieves the queue user data.
#[inline]
pub fn user_data<'a>(&'a self) -> &'a QUD {
unsafe {
cast::transmute(self.queue_data)
}
}
}
/// A work queue on which units of work can be submitted.
pub struct WorkQueue<QUD,WUD> {
/// Information about each of the workers.
priv workers: ~[WorkerInfo<QUD,WUD>],
/// A port on which deques can be received from the workers.
priv port: Receiver<SupervisorMsg<QUD,WUD>>,
/// The amount of work that has been enqueued.
priv work_count: uint,
/// Arbitrary user data.
data: QUD,
}
impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
/// Creates a new work queue and spawns all the threads associated with
/// it.
pub fn new(task_name: &'static str, thread_count: uint, user_data: QUD) -> WorkQueue<QUD,WUD> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (~[], ~[]);
for i in range(0, thread_count) {
let (worker_chan, worker_port) = channel();
let mut pool = BufferPool::new();
let (worker, thief) = pool.deque();
infos.push(WorkerInfo {
chan: worker_chan,
pool: pool,
deque: Some(worker),
thief: thief,
});
threads.push(WorkerThread {
index: i,
port: worker_port,
chan: supervisor_chan.clone(),
other_deques: ~[],
rng: rand::weak_rng(),
});
}
// Connect workers to one another.
for i in range(0, thread_count) {
for j in range(0, thread_count) {
if i!= j {
threads[i].other_deques.push(infos[j].thief.clone())
}
}
assert!(threads[i].other_deques.len() == thread_count - 1)
}
// Spawn threads.
for thread in threads.move_iter() {
let mut opts = TaskOpts::new();
opts.name = Some(task_name.into_maybe_owned());
native::task::spawn_opts(opts, proc() {
let mut thread = thread;
thread.start()
})
}
WorkQueue {
workers: infos,
port: supervisor_port,
work_count: 0,
data: user_data,
}
}
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) {
match self.workers[0].deque {
None => {
fail!("tried to push a block but we don't have the deque?!")
}
Some(ref mut deque) => deque.push(work_unit),
}
self.work_count += 1
}
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self) {
// Tell the workers to start.
let mut work_count = AtomicUint::new(self.work_count);
for worker in self.workers.mut_iter() {
worker.chan.send(StartMsg(worker.deque.take_unwrap(), &mut work_count, &self.data))
}
// Wait for the work to finish.
drop(self.port.recv());
self.work_count = 0;
// Tell everyone to stop.
for worker in self.workers.iter() {
worker.chan.send(StopMsg)
}
// Get our deques back.
for _ in range(0, self.workers.len()) {
match self.port.recv() {
ReturnDequeMsg(index, deque) => self.workers[index].deque = Some(deque),
FinishedMsg => fail!("unexpected finished message!"),
}
}
}
pub fn shutdown(&mut self) {
for worker in self.workers.iter() {
worker.chan.send(ExitMsg)
}
}
}
|
ExitMsg => return,
};
// We're off!
|
random_line_split
|
workqueue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A work queue for scheduling units of work across threads in a fork-join fashion.
//!
//! Data associated with queues is simply a pair of unsigned integers. It is expected that a
//! higher-level API on top of this could allow safe fork-join parallelism.
use native;
use rand;
use rand::{Rng, XorShiftRng};
use std::cast;
use std::comm;
use std::mem;
use std::sync::atomics::{AtomicUint, SeqCst};
use std::sync::deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
use std::task::TaskOpts;
/// A unit of work.
///
/// The type parameter `QUD` stands for "queue user data" and represents global custom data for the
/// entire work queue, and the type parameter `WUD` stands for "work user data" and represents
/// custom data specific to each unit of work.
pub struct WorkUnit<QUD,WUD> {
/// The function to execute.
fun: extern "Rust" fn(WUD, &mut WorkerProxy<QUD,WUD>),
/// Arbitrary data.
data: WUD,
}
/// Messages from the supervisor to the worker.
enum WorkerMsg<QUD,WUD> {
/// Tells the worker to start work.
StartMsg(Worker<WorkUnit<QUD,WUD>>, *mut AtomicUint, *QUD),
/// Tells the worker to stop. It can be restarted again with a `StartMsg`.
StopMsg,
/// Tells the worker thread to terminate.
ExitMsg,
}
/// Messages to the supervisor.
enum SupervisorMsg<QUD,WUD> {
FinishedMsg,
ReturnDequeMsg(uint, Worker<WorkUnit<QUD,WUD>>),
}
/// Information that the supervisor thread keeps about the worker threads.
struct WorkerInfo<QUD,WUD> {
/// The communication channel to the workers.
chan: Sender<WorkerMsg<QUD,WUD>>,
/// The buffer pool for this deque.
pool: BufferPool<WorkUnit<QUD,WUD>>,
/// The worker end of the deque, if we have it.
deque: Option<Worker<WorkUnit<QUD,WUD>>>,
/// The thief end of the work-stealing deque.
thief: Stealer<WorkUnit<QUD,WUD>>,
}
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QUD,WUD> {
/// The index of this worker.
index: uint,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QUD,WUD>>,
/// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QUD,WUD>>,
/// The thief end of the work-stealing deque for all other workers.
other_deques: ~[Stealer<WorkUnit<QUD,WUD>>],
/// The random number generator for this worker.
rng: XorShiftRng,
}
static SPIN_COUNT: uint = 1000;
impl<QUD:Send,WUD:Send> WorkerThread<QUD,WUD> {
/// The main logic. This function starts up the worker and listens for
/// messages.
pub fn start(&mut self) {
loop {
// Wait for a start message.
let (mut deque, ref_count, queue_data) = match self.port.recv() {
StartMsg(deque, ref_count, queue_data) => (deque, ref_count, queue_data),
StopMsg => fail!("unexpected stop message"),
ExitMsg => return,
};
// We're off!
//
// FIXME(pcwalton): Can't use labeled break or continue cross-crate due to a Rust bug.
loop {
// FIXME(pcwalton): Nasty workaround for the lack of labeled break/continue
// cross-crate.
let mut work_unit = unsafe {
mem::uninit()
};
match deque.pop() {
Some(work) => work_unit = work,
None => {
// Become a thief.
let mut i = 0;
let mut should_continue = true;
loop {
let victim = (self.rng.next_u32() as uint) % self.other_deques.len();
match self.other_deques[victim].steal() {
Empty | Abort => {
// Continue.
}
Data(work) => {
work_unit = work;
break
}
}
if i == SPIN_COUNT {
match self.port.try_recv() {
comm::Data(StopMsg) => {
should_continue = false;
break
}
comm::Data(ExitMsg) => return,
comm::Data(_) => fail!("unexpected message"),
_ => {}
}
i = 0
} else {
i += 1
}
}
if!should_continue {
break
}
}
}
// At this point, we have some work. Perform it.
let mut proxy = WorkerProxy {
worker: &mut deque,
ref_count: ref_count,
queue_data: queue_data,
};
(work_unit.fun)(work_unit.data, &mut proxy);
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, SeqCst) == 1 {
self.chan.send(FinishedMsg)
}
}
}
// Give the deque back to the supervisor.
self.chan.send(ReturnDequeMsg(self.index, deque))
}
}
}
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a,QUD,WUD> {
priv worker: &'a mut Worker<WorkUnit<QUD,WUD>>,
priv ref_count: *mut AtomicUint,
priv queue_data: *QUD,
}
impl<'a,QUD,WUD:Send> WorkerProxy<'a,QUD,WUD> {
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) {
unsafe {
drop((*self.ref_count).fetch_add(1, SeqCst));
}
self.worker.push(work_unit);
}
/// Retrieves the queue user data.
#[inline]
pub fn user_data<'a>(&'a self) -> &'a QUD {
unsafe {
cast::transmute(self.queue_data)
}
}
}
/// A work queue on which units of work can be submitted.
pub struct WorkQueue<QUD,WUD> {
/// Information about each of the workers.
priv workers: ~[WorkerInfo<QUD,WUD>],
/// A port on which deques can be received from the workers.
priv port: Receiver<SupervisorMsg<QUD,WUD>>,
/// The amount of work that has been enqueued.
priv work_count: uint,
/// Arbitrary user data.
data: QUD,
}
impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
/// Creates a new work queue and spawns all the threads associated with
/// it.
pub fn new(task_name: &'static str, thread_count: uint, user_data: QUD) -> WorkQueue<QUD,WUD> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (~[], ~[]);
for i in range(0, thread_count) {
let (worker_chan, worker_port) = channel();
let mut pool = BufferPool::new();
let (worker, thief) = pool.deque();
infos.push(WorkerInfo {
chan: worker_chan,
pool: pool,
deque: Some(worker),
thief: thief,
});
threads.push(WorkerThread {
index: i,
port: worker_port,
chan: supervisor_chan.clone(),
other_deques: ~[],
rng: rand::weak_rng(),
});
}
// Connect workers to one another.
for i in range(0, thread_count) {
for j in range(0, thread_count) {
if i!= j {
threads[i].other_deques.push(infos[j].thief.clone())
}
}
assert!(threads[i].other_deques.len() == thread_count - 1)
}
// Spawn threads.
for thread in threads.move_iter() {
let mut opts = TaskOpts::new();
opts.name = Some(task_name.into_maybe_owned());
native::task::spawn_opts(opts, proc() {
let mut thread = thread;
thread.start()
})
}
WorkQueue {
workers: infos,
port: supervisor_port,
work_count: 0,
data: user_data,
}
}
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>)
|
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self) {
// Tell the workers to start.
let mut work_count = AtomicUint::new(self.work_count);
for worker in self.workers.mut_iter() {
worker.chan.send(StartMsg(worker.deque.take_unwrap(), &mut work_count, &self.data))
}
// Wait for the work to finish.
drop(self.port.recv());
self.work_count = 0;
// Tell everyone to stop.
for worker in self.workers.iter() {
worker.chan.send(StopMsg)
}
// Get our deques back.
for _ in range(0, self.workers.len()) {
match self.port.recv() {
ReturnDequeMsg(index, deque) => self.workers[index].deque = Some(deque),
FinishedMsg => fail!("unexpected finished message!"),
}
}
}
pub fn shutdown(&mut self) {
for worker in self.workers.iter() {
worker.chan.send(ExitMsg)
}
}
}
|
{
match self.workers[0].deque {
None => {
fail!("tried to push a block but we don't have the deque?!")
}
Some(ref mut deque) => deque.push(work_unit),
}
self.work_count += 1
}
|
identifier_body
|
workqueue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A work queue for scheduling units of work across threads in a fork-join fashion.
//!
//! Data associated with queues is simply a pair of unsigned integers. It is expected that a
//! higher-level API on top of this could allow safe fork-join parallelism.
use native;
use rand;
use rand::{Rng, XorShiftRng};
use std::cast;
use std::comm;
use std::mem;
use std::sync::atomics::{AtomicUint, SeqCst};
use std::sync::deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
use std::task::TaskOpts;
/// A unit of work.
///
/// The type parameter `QUD` stands for "queue user data" and represents global custom data for the
/// entire work queue, and the type parameter `WUD` stands for "work user data" and represents
/// custom data specific to each unit of work.
pub struct WorkUnit<QUD,WUD> {
/// The function to execute.
fun: extern "Rust" fn(WUD, &mut WorkerProxy<QUD,WUD>),
/// Arbitrary data.
data: WUD,
}
/// Messages from the supervisor to the worker.
enum WorkerMsg<QUD,WUD> {
/// Tells the worker to start work.
StartMsg(Worker<WorkUnit<QUD,WUD>>, *mut AtomicUint, *QUD),
/// Tells the worker to stop. It can be restarted again with a `StartMsg`.
StopMsg,
/// Tells the worker thread to terminate.
ExitMsg,
}
/// Messages to the supervisor.
enum SupervisorMsg<QUD,WUD> {
FinishedMsg,
ReturnDequeMsg(uint, Worker<WorkUnit<QUD,WUD>>),
}
/// Information that the supervisor thread keeps about the worker threads.
struct WorkerInfo<QUD,WUD> {
/// The communication channel to the workers.
chan: Sender<WorkerMsg<QUD,WUD>>,
/// The buffer pool for this deque.
pool: BufferPool<WorkUnit<QUD,WUD>>,
/// The worker end of the deque, if we have it.
deque: Option<Worker<WorkUnit<QUD,WUD>>>,
/// The thief end of the work-stealing deque.
thief: Stealer<WorkUnit<QUD,WUD>>,
}
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QUD,WUD> {
/// The index of this worker.
index: uint,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QUD,WUD>>,
/// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QUD,WUD>>,
/// The thief end of the work-stealing deque for all other workers.
other_deques: ~[Stealer<WorkUnit<QUD,WUD>>],
/// The random number generator for this worker.
rng: XorShiftRng,
}
static SPIN_COUNT: uint = 1000;
impl<QUD:Send,WUD:Send> WorkerThread<QUD,WUD> {
/// The main logic. This function starts up the worker and listens for
/// messages.
pub fn start(&mut self) {
loop {
// Wait for a start message.
let (mut deque, ref_count, queue_data) = match self.port.recv() {
StartMsg(deque, ref_count, queue_data) => (deque, ref_count, queue_data),
StopMsg => fail!("unexpected stop message"),
ExitMsg => return,
};
// We're off!
//
// FIXME(pcwalton): Can't use labeled break or continue cross-crate due to a Rust bug.
loop {
// FIXME(pcwalton): Nasty workaround for the lack of labeled break/continue
// cross-crate.
let mut work_unit = unsafe {
mem::uninit()
};
match deque.pop() {
Some(work) => work_unit = work,
None => {
// Become a thief.
let mut i = 0;
let mut should_continue = true;
loop {
let victim = (self.rng.next_u32() as uint) % self.other_deques.len();
match self.other_deques[victim].steal() {
Empty | Abort => {
// Continue.
}
Data(work) => {
work_unit = work;
break
}
}
if i == SPIN_COUNT {
match self.port.try_recv() {
comm::Data(StopMsg) => {
should_continue = false;
break
}
comm::Data(ExitMsg) => return,
comm::Data(_) => fail!("unexpected message"),
_ => {}
}
i = 0
} else {
i += 1
}
}
if!should_continue {
break
}
}
}
// At this point, we have some work. Perform it.
let mut proxy = WorkerProxy {
worker: &mut deque,
ref_count: ref_count,
queue_data: queue_data,
};
(work_unit.fun)(work_unit.data, &mut proxy);
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, SeqCst) == 1
|
}
}
// Give the deque back to the supervisor.
self.chan.send(ReturnDequeMsg(self.index, deque))
}
}
}
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a,QUD,WUD> {
priv worker: &'a mut Worker<WorkUnit<QUD,WUD>>,
priv ref_count: *mut AtomicUint,
priv queue_data: *QUD,
}
impl<'a,QUD,WUD:Send> WorkerProxy<'a,QUD,WUD> {
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) {
unsafe {
drop((*self.ref_count).fetch_add(1, SeqCst));
}
self.worker.push(work_unit);
}
/// Retrieves the queue user data.
#[inline]
pub fn user_data<'a>(&'a self) -> &'a QUD {
unsafe {
cast::transmute(self.queue_data)
}
}
}
/// A work queue on which units of work can be submitted.
pub struct WorkQueue<QUD,WUD> {
/// Information about each of the workers.
priv workers: ~[WorkerInfo<QUD,WUD>],
/// A port on which deques can be received from the workers.
priv port: Receiver<SupervisorMsg<QUD,WUD>>,
/// The amount of work that has been enqueued.
priv work_count: uint,
/// Arbitrary user data.
data: QUD,
}
impl<QUD:Send,WUD:Send> WorkQueue<QUD,WUD> {
/// Creates a new work queue and spawns all the threads associated with
/// it.
pub fn new(task_name: &'static str, thread_count: uint, user_data: QUD) -> WorkQueue<QUD,WUD> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (~[], ~[]);
for i in range(0, thread_count) {
let (worker_chan, worker_port) = channel();
let mut pool = BufferPool::new();
let (worker, thief) = pool.deque();
infos.push(WorkerInfo {
chan: worker_chan,
pool: pool,
deque: Some(worker),
thief: thief,
});
threads.push(WorkerThread {
index: i,
port: worker_port,
chan: supervisor_chan.clone(),
other_deques: ~[],
rng: rand::weak_rng(),
});
}
// Connect workers to one another.
for i in range(0, thread_count) {
for j in range(0, thread_count) {
if i!= j {
threads[i].other_deques.push(infos[j].thief.clone())
}
}
assert!(threads[i].other_deques.len() == thread_count - 1)
}
// Spawn threads.
for thread in threads.move_iter() {
let mut opts = TaskOpts::new();
opts.name = Some(task_name.into_maybe_owned());
native::task::spawn_opts(opts, proc() {
let mut thread = thread;
thread.start()
})
}
WorkQueue {
workers: infos,
port: supervisor_port,
work_count: 0,
data: user_data,
}
}
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QUD,WUD>) {
match self.workers[0].deque {
None => {
fail!("tried to push a block but we don't have the deque?!")
}
Some(ref mut deque) => deque.push(work_unit),
}
self.work_count += 1
}
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self) {
// Tell the workers to start.
let mut work_count = AtomicUint::new(self.work_count);
for worker in self.workers.mut_iter() {
worker.chan.send(StartMsg(worker.deque.take_unwrap(), &mut work_count, &self.data))
}
// Wait for the work to finish.
drop(self.port.recv());
self.work_count = 0;
// Tell everyone to stop.
for worker in self.workers.iter() {
worker.chan.send(StopMsg)
}
// Get our deques back.
for _ in range(0, self.workers.len()) {
match self.port.recv() {
ReturnDequeMsg(index, deque) => self.workers[index].deque = Some(deque),
FinishedMsg => fail!("unexpected finished message!"),
}
}
}
pub fn shutdown(&mut self) {
for worker in self.workers.iter() {
worker.chan.send(ExitMsg)
}
}
}
|
{
self.chan.send(FinishedMsg)
}
|
conditional_block
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(alloc)]
#![feature(box_syntax)]
#![feature(collections)]
#![feature(core)]
#![feature(plugin)]
#![feature(rustc_private)]
#![feature(std_misc)]
#![feature(str_char)]
#![plugin(plugins)]
#[macro_use]
extern crate log;
extern crate azure;
#[macro_use] extern crate bitflags;
extern crate collections;
extern crate geom;
extern crate layers;
extern crate libc;
extern crate stb_image;
extern crate png;
#[macro_use]
extern crate profile_traits;
extern crate script_traits;
extern crate rustc_serialize;
extern crate net_traits;
#[macro_use]
extern crate util;
extern crate msg;
extern crate rand;
extern crate string_cache;
extern crate style;
extern crate skia;
extern crate time;
extern crate url;
extern crate gfx_traits;
extern crate canvas_traits;
// Eventually we would like the shaper to be pluggable, as many operating systems have their own
// shapers. For now, however, this is a hard dependency.
extern crate harfbuzz;
// Linux and Android-specific library dependencies
#[cfg(any(target_os="linux", target_os = "android"))]
extern crate fontconfig;
#[cfg(any(target_os="linux", target_os = "android"))]
extern crate freetype;
// Mac OS-specific library dependencies
#[cfg(target_os="macos")] extern crate core_foundation;
#[cfg(target_os="macos")] extern crate core_graphics;
#[cfg(target_os="macos")] extern crate core_text;
pub use paint_context::PaintContext;
// Private painting modules
mod paint_context;
|
pub mod font;
pub mod font_context;
pub mod font_cache_task;
pub mod font_template;
// Misc.
mod buffer_map;
mod filters;
// Platform-specific implementations.
#[path="platform/mod.rs"]
pub mod platform;
// Text
#[path = "text/mod.rs"]
pub mod text;
|
#[path="display_list/mod.rs"]
pub mod display_list;
pub mod paint_task;
// Fonts
|
random_line_split
|
TestNativeCosh.rs
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
|
#pragma version(1)
#pragma rs java_package_name(android.renderscript.cts)
// Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
float __attribute__((kernel)) testNativeCoshFloatFloat(float in) {
return native_cosh(in);
}
float2 __attribute__((kernel)) testNativeCoshFloat2Float2(float2 in) {
return native_cosh(in);
}
float3 __attribute__((kernel)) testNativeCoshFloat3Float3(float3 in) {
return native_cosh(in);
}
float4 __attribute__((kernel)) testNativeCoshFloat4Float4(float4 in) {
return native_cosh(in);
}
|
random_line_split
|
|
union-c-interop.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(non_snake_case)]
// ignore-wasm32-bare no libc to test ffi with
#[derive(Clone, Copy)]
#[repr(C)]
struct
|
{
LowPart: u32,
HighPart: u32,
}
#[derive(Clone, Copy)]
#[repr(C)]
union LARGE_INTEGER {
__unnamed__: LARGE_INTEGER_U,
u: LARGE_INTEGER_U,
QuadPart: u64,
}
#[link(name = "rust_test_helpers", kind = "static")]
extern "C" {
fn increment_all_parts(_: LARGE_INTEGER) -> LARGE_INTEGER;
}
fn main() {
unsafe {
let mut li = LARGE_INTEGER { QuadPart: 0 };
let li_c = increment_all_parts(li);
li.__unnamed__.LowPart += 1;
li.__unnamed__.HighPart += 1;
li.u.LowPart += 1;
li.u.HighPart += 1;
li.QuadPart += 1;
assert_eq!(li.QuadPart, li_c.QuadPart);
}
}
|
LARGE_INTEGER_U
|
identifier_name
|
union-c-interop.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(non_snake_case)]
// ignore-wasm32-bare no libc to test ffi with
#[derive(Clone, Copy)]
#[repr(C)]
struct LARGE_INTEGER_U {
LowPart: u32,
HighPart: u32,
}
#[derive(Clone, Copy)]
#[repr(C)]
union LARGE_INTEGER {
__unnamed__: LARGE_INTEGER_U,
u: LARGE_INTEGER_U,
QuadPart: u64,
}
#[link(name = "rust_test_helpers", kind = "static")]
extern "C" {
fn increment_all_parts(_: LARGE_INTEGER) -> LARGE_INTEGER;
}
fn main()
|
{
unsafe {
let mut li = LARGE_INTEGER { QuadPart: 0 };
let li_c = increment_all_parts(li);
li.__unnamed__.LowPart += 1;
li.__unnamed__.HighPart += 1;
li.u.LowPart += 1;
li.u.HighPart += 1;
li.QuadPart += 1;
assert_eq!(li.QuadPart, li_c.QuadPart);
}
}
|
identifier_body
|
|
union-c-interop.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(non_snake_case)]
// ignore-wasm32-bare no libc to test ffi with
#[derive(Clone, Copy)]
#[repr(C)]
struct LARGE_INTEGER_U {
LowPart: u32,
HighPart: u32,
}
#[derive(Clone, Copy)]
#[repr(C)]
union LARGE_INTEGER {
__unnamed__: LARGE_INTEGER_U,
u: LARGE_INTEGER_U,
QuadPart: u64,
}
#[link(name = "rust_test_helpers", kind = "static")]
extern "C" {
fn increment_all_parts(_: LARGE_INTEGER) -> LARGE_INTEGER;
}
fn main() {
unsafe {
let mut li = LARGE_INTEGER { QuadPart: 0 };
let li_c = increment_all_parts(li);
li.__unnamed__.LowPart += 1;
li.__unnamed__.HighPart += 1;
li.u.LowPart += 1;
li.u.HighPart += 1;
li.QuadPart += 1;
assert_eq!(li.QuadPart, li_c.QuadPart);
}
}
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
random_line_split
|
kbo.rs
|
// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos.
//
// Serkr is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Serkr is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Serkr. If not, see <http://www.gnu.org/licenses/>.
//
use std::collections::hash_map::HashMap;
use prover::data_structures::term::Term;
use prover::ordering::precedence::Precedence;
use prover::ordering::weight::Weight;
/// Checks if s is greater than t according to the ordering.
pub fn kbo_gt(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
if s.is_function() && t.is_function() {
let s_weight = weight.weight(only_unary_func, s);
let t_weight = weight.weight(only_unary_func, t);
if s_weight > t_weight {
variable_domination(s, t)
} else if s_weight == t_weight {
if kbo_precedence(precedence, only_unary_func, s, t) ||
(s.get_id() == t.get_id() &&
lexical_ordering(precedence, weight, only_unary_func, s, t)) {
variable_domination(s, t)
} else {
false
}
} else {
false
}
} else if s.is_function() && t.is_variable() {
s.occurs_proper(t)
} else {
false
}
}
/// Checks if s is greater than or equal to t according to the ordering.
pub fn kbo_ge(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
s == t || kbo_gt(precedence, weight, only_unary_func, s, t)
}
/// Checks if for every variable x the amount of x in s is greater than or equal to the amount in t.
fn variable_domination(s: &Term, t: &Term) -> bool {
let mut variable_counts = HashMap::new();
variable_count(&mut variable_counts, s, 1);
variable_count(&mut variable_counts, t, -1);
variable_counts.values().all(|&count| count >= 0)
}
fn variable_count(counts: &mut HashMap<i64, i64>, t: &Term, weight: i64) {
if t.is_variable() {
let v = counts.entry(t.get_id()).or_insert(0);
*v += weight;
} else {
for x in t.iter() {
variable_count(counts, x, weight);
}
}
}
fn lexical_ordering(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
assert_eq!(s.get_id(), t.get_id());
assert_eq!(s.get_arity(), t.get_arity());
for i in 0..s.get_arity() {
if kbo_gt(precedence, weight, only_unary_func, &s[i], &t[i]) {
return true;
} else if s[i]!= t[i] {
return false;
}
}
false
}
/// Expands the precedence so that it is suitable for KBO.
/// If there is exactly one unary function in the problem, it is greater than all other functions.
fn kbo_precedence(precedence: &Precedence,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
if let Some(id) = *only_unary_func {
if s.get_id() == id && t.get_id()!= id {
return true;
} else if s.get_id()!= id && t.get_id() == id {
return false;
} else if s.get_id() == id && t.get_id() == id {
return false;
}
}
precedence.gt(s, t)
}
#[cfg(test)]
mod test {
use super::kbo_gt;
use prover::data_structures::term::Term;
use prover::ordering::precedence::Precedence;
use prover::ordering::weight::Weight;
#[test]
fn kbo_gt_1() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = None;
let x = Term::new_variable(-1);
let y = Term::new_variable(-2);
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &y));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &y, &x));
}
#[test]
fn kbo_gt_2() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = None;
let x = Term::new_variable(-1);
let f_x = Term::new_function(1, vec![x.clone()]);
|
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &x));
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_x, &x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &f_f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &f_f_x));
}
#[test]
fn kbo_gt_3() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = Some(1);
let x = Term::new_variable(-1);
let f_x = Term::new_function(1, vec![x.clone()]);
let f_f_x = Term::new_function(1, vec![f_x.clone()]);
let f_f_f_x = Term::new_function(1, vec![f_f_x]);
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_f_x, &f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &f_f_f_x));
}
}
|
let f_f_x = Term::new_function(1, vec![f_x.clone()]);
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_x, &f_x));
|
random_line_split
|
kbo.rs
|
// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos.
//
// Serkr is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Serkr is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Serkr. If not, see <http://www.gnu.org/licenses/>.
//
use std::collections::hash_map::HashMap;
use prover::data_structures::term::Term;
use prover::ordering::precedence::Precedence;
use prover::ordering::weight::Weight;
/// Checks if s is greater than t according to the ordering.
pub fn kbo_gt(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
if s.is_function() && t.is_function() {
let s_weight = weight.weight(only_unary_func, s);
let t_weight = weight.weight(only_unary_func, t);
if s_weight > t_weight {
variable_domination(s, t)
} else if s_weight == t_weight {
if kbo_precedence(precedence, only_unary_func, s, t) ||
(s.get_id() == t.get_id() &&
lexical_ordering(precedence, weight, only_unary_func, s, t)) {
variable_domination(s, t)
} else {
false
}
} else {
false
}
} else if s.is_function() && t.is_variable() {
s.occurs_proper(t)
} else {
false
}
}
/// Checks if s is greater than or equal to t according to the ordering.
pub fn kbo_ge(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
s == t || kbo_gt(precedence, weight, only_unary_func, s, t)
}
/// Checks if for every variable x the amount of x in s is greater than or equal to the amount in t.
fn variable_domination(s: &Term, t: &Term) -> bool {
let mut variable_counts = HashMap::new();
variable_count(&mut variable_counts, s, 1);
variable_count(&mut variable_counts, t, -1);
variable_counts.values().all(|&count| count >= 0)
}
fn variable_count(counts: &mut HashMap<i64, i64>, t: &Term, weight: i64) {
if t.is_variable() {
let v = counts.entry(t.get_id()).or_insert(0);
*v += weight;
} else {
for x in t.iter() {
variable_count(counts, x, weight);
}
}
}
fn lexical_ordering(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
assert_eq!(s.get_id(), t.get_id());
assert_eq!(s.get_arity(), t.get_arity());
for i in 0..s.get_arity() {
if kbo_gt(precedence, weight, only_unary_func, &s[i], &t[i]) {
return true;
} else if s[i]!= t[i] {
return false;
}
}
false
}
/// Expands the precedence so that it is suitable for KBO.
/// If there is exactly one unary function in the problem, it is greater than all other functions.
fn kbo_precedence(precedence: &Precedence,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
if let Some(id) = *only_unary_func {
if s.get_id() == id && t.get_id()!= id {
return true;
} else if s.get_id()!= id && t.get_id() == id {
return false;
} else if s.get_id() == id && t.get_id() == id {
return false;
}
}
precedence.gt(s, t)
}
#[cfg(test)]
mod test {
use super::kbo_gt;
use prover::data_structures::term::Term;
use prover::ordering::precedence::Precedence;
use prover::ordering::weight::Weight;
#[test]
fn kbo_gt_1() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = None;
let x = Term::new_variable(-1);
let y = Term::new_variable(-2);
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &y));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &y, &x));
}
#[test]
fn kbo_gt_2() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = None;
let x = Term::new_variable(-1);
let f_x = Term::new_function(1, vec![x.clone()]);
let f_f_x = Term::new_function(1, vec![f_x.clone()]);
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_x, &f_x));
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &x));
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_x, &x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &f_f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &f_f_x));
}
#[test]
fn kbo_gt_3()
|
}
|
{
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = Some(1);
let x = Term::new_variable(-1);
let f_x = Term::new_function(1, vec![x.clone()]);
let f_f_x = Term::new_function(1, vec![f_x.clone()]);
let f_f_f_x = Term::new_function(1, vec![f_f_x]);
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_f_x, &f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &f_f_f_x));
}
|
identifier_body
|
kbo.rs
|
// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos.
//
// Serkr is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Serkr is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Serkr. If not, see <http://www.gnu.org/licenses/>.
//
use std::collections::hash_map::HashMap;
use prover::data_structures::term::Term;
use prover::ordering::precedence::Precedence;
use prover::ordering::weight::Weight;
/// Checks if s is greater than t according to the ordering.
pub fn kbo_gt(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
if s.is_function() && t.is_function() {
let s_weight = weight.weight(only_unary_func, s);
let t_weight = weight.weight(only_unary_func, t);
if s_weight > t_weight {
variable_domination(s, t)
} else if s_weight == t_weight {
if kbo_precedence(precedence, only_unary_func, s, t) ||
(s.get_id() == t.get_id() &&
lexical_ordering(precedence, weight, only_unary_func, s, t)) {
variable_domination(s, t)
} else
|
} else {
false
}
} else if s.is_function() && t.is_variable() {
s.occurs_proper(t)
} else {
false
}
}
/// Checks if s is greater than or equal to t according to the ordering.
pub fn kbo_ge(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
s == t || kbo_gt(precedence, weight, only_unary_func, s, t)
}
/// Checks if for every variable x the amount of x in s is greater than or equal to the amount in t.
fn variable_domination(s: &Term, t: &Term) -> bool {
let mut variable_counts = HashMap::new();
variable_count(&mut variable_counts, s, 1);
variable_count(&mut variable_counts, t, -1);
variable_counts.values().all(|&count| count >= 0)
}
fn variable_count(counts: &mut HashMap<i64, i64>, t: &Term, weight: i64) {
if t.is_variable() {
let v = counts.entry(t.get_id()).or_insert(0);
*v += weight;
} else {
for x in t.iter() {
variable_count(counts, x, weight);
}
}
}
fn lexical_ordering(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
assert_eq!(s.get_id(), t.get_id());
assert_eq!(s.get_arity(), t.get_arity());
for i in 0..s.get_arity() {
if kbo_gt(precedence, weight, only_unary_func, &s[i], &t[i]) {
return true;
} else if s[i]!= t[i] {
return false;
}
}
false
}
/// Expands the precedence so that it is suitable for KBO.
/// If there is exactly one unary function in the problem, it is greater than all other functions.
fn kbo_precedence(precedence: &Precedence,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
if let Some(id) = *only_unary_func {
if s.get_id() == id && t.get_id()!= id {
return true;
} else if s.get_id()!= id && t.get_id() == id {
return false;
} else if s.get_id() == id && t.get_id() == id {
return false;
}
}
precedence.gt(s, t)
}
#[cfg(test)]
mod test {
use super::kbo_gt;
use prover::data_structures::term::Term;
use prover::ordering::precedence::Precedence;
use prover::ordering::weight::Weight;
#[test]
fn kbo_gt_1() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = None;
let x = Term::new_variable(-1);
let y = Term::new_variable(-2);
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &y));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &y, &x));
}
#[test]
fn kbo_gt_2() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = None;
let x = Term::new_variable(-1);
let f_x = Term::new_function(1, vec![x.clone()]);
let f_f_x = Term::new_function(1, vec![f_x.clone()]);
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_x, &f_x));
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &x));
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_x, &x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &f_f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &f_f_x));
}
#[test]
fn kbo_gt_3() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = Some(1);
let x = Term::new_variable(-1);
let f_x = Term::new_function(1, vec![x.clone()]);
let f_f_x = Term::new_function(1, vec![f_x.clone()]);
let f_f_f_x = Term::new_function(1, vec![f_f_x]);
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_f_x, &f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &f_f_f_x));
}
}
|
{
false
}
|
conditional_block
|
kbo.rs
|
// Serkr - An automated theorem prover. Copyright (C) 2015-2016 Mikko Aarnos.
//
// Serkr is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Serkr is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Serkr. If not, see <http://www.gnu.org/licenses/>.
//
use std::collections::hash_map::HashMap;
use prover::data_structures::term::Term;
use prover::ordering::precedence::Precedence;
use prover::ordering::weight::Weight;
/// Checks if s is greater than t according to the ordering.
pub fn kbo_gt(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
if s.is_function() && t.is_function() {
let s_weight = weight.weight(only_unary_func, s);
let t_weight = weight.weight(only_unary_func, t);
if s_weight > t_weight {
variable_domination(s, t)
} else if s_weight == t_weight {
if kbo_precedence(precedence, only_unary_func, s, t) ||
(s.get_id() == t.get_id() &&
lexical_ordering(precedence, weight, only_unary_func, s, t)) {
variable_domination(s, t)
} else {
false
}
} else {
false
}
} else if s.is_function() && t.is_variable() {
s.occurs_proper(t)
} else {
false
}
}
/// Checks if s is greater than or equal to t according to the ordering.
pub fn kbo_ge(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
s == t || kbo_gt(precedence, weight, only_unary_func, s, t)
}
/// Checks if for every variable x the amount of x in s is greater than or equal to the amount in t.
fn variable_domination(s: &Term, t: &Term) -> bool {
let mut variable_counts = HashMap::new();
variable_count(&mut variable_counts, s, 1);
variable_count(&mut variable_counts, t, -1);
variable_counts.values().all(|&count| count >= 0)
}
fn variable_count(counts: &mut HashMap<i64, i64>, t: &Term, weight: i64) {
if t.is_variable() {
let v = counts.entry(t.get_id()).or_insert(0);
*v += weight;
} else {
for x in t.iter() {
variable_count(counts, x, weight);
}
}
}
fn
|
(precedence: &Precedence,
weight: &Weight,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
assert_eq!(s.get_id(), t.get_id());
assert_eq!(s.get_arity(), t.get_arity());
for i in 0..s.get_arity() {
if kbo_gt(precedence, weight, only_unary_func, &s[i], &t[i]) {
return true;
} else if s[i]!= t[i] {
return false;
}
}
false
}
/// Expands the precedence so that it is suitable for KBO.
/// If there is exactly one unary function in the problem, it is greater than all other functions.
fn kbo_precedence(precedence: &Precedence,
only_unary_func: &Option<i64>,
s: &Term,
t: &Term)
-> bool {
if let Some(id) = *only_unary_func {
if s.get_id() == id && t.get_id()!= id {
return true;
} else if s.get_id()!= id && t.get_id() == id {
return false;
} else if s.get_id() == id && t.get_id() == id {
return false;
}
}
precedence.gt(s, t)
}
#[cfg(test)]
mod test {
use super::kbo_gt;
use prover::data_structures::term::Term;
use prover::ordering::precedence::Precedence;
use prover::ordering::weight::Weight;
#[test]
fn kbo_gt_1() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = None;
let x = Term::new_variable(-1);
let y = Term::new_variable(-2);
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &y));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &y, &x));
}
#[test]
fn kbo_gt_2() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = None;
let x = Term::new_variable(-1);
let f_x = Term::new_function(1, vec![x.clone()]);
let f_f_x = Term::new_function(1, vec![f_x.clone()]);
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_x, &f_x));
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &x));
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_x, &x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &f_f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &x, &f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &f_f_x));
}
#[test]
fn kbo_gt_3() {
let precedence = Precedence::default();
let weight = Weight::SimpleWeight;
let only_unary_func = Some(1);
let x = Term::new_variable(-1);
let f_x = Term::new_function(1, vec![x.clone()]);
let f_f_x = Term::new_function(1, vec![f_x.clone()]);
let f_f_f_x = Term::new_function(1, vec![f_f_x]);
assert!(kbo_gt(&precedence, &weight, &only_unary_func, &f_f_f_x, &f_x));
assert!(!kbo_gt(&precedence, &weight, &only_unary_func, &f_x, &f_f_f_x));
}
}
|
lexical_ordering
|
identifier_name
|
lib.rs
|
/// Generate a secret key for signing and verifying JSON Web Tokens and HMACs.
///
/// Returns a secret comprised of 64 URL and command line compatible characters
/// (e.g. so that it can easily be entered on the CLI e.g. for a `--key` option ).
///
/// Uses 64 bytes because this is the maximum size possible for JWT signing keys.
/// Using a large key for JWT signing reduces the probability of brute force attacks.
|
use rand::Rng;
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
let mut rng = rand::thread_rng();
(0..64)
.map(|_| {
let idx = rng.gen_range(0..CHARSET.len());
CHARSET[idx] as char
})
.collect()
}
|
/// See <https://auth0.com/blog/brute-forcing-hs256-is-possible-the-importance-of-using-strong-keys-to-sign-jwts/>.
pub fn generate() -> String {
|
random_line_split
|
lib.rs
|
/// Generate a secret key for signing and verifying JSON Web Tokens and HMACs.
///
/// Returns a secret comprised of 64 URL and command line compatible characters
/// (e.g. so that it can easily be entered on the CLI e.g. for a `--key` option ).
///
/// Uses 64 bytes because this is the maximum size possible for JWT signing keys.
/// Using a large key for JWT signing reduces the probability of brute force attacks.
/// See <https://auth0.com/blog/brute-forcing-hs256-is-possible-the-importance-of-using-strong-keys-to-sign-jwts/>.
pub fn
|
() -> String {
use rand::Rng;
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
let mut rng = rand::thread_rng();
(0..64)
.map(|_| {
let idx = rng.gen_range(0..CHARSET.len());
CHARSET[idx] as char
})
.collect()
}
|
generate
|
identifier_name
|
lib.rs
|
/// Generate a secret key for signing and verifying JSON Web Tokens and HMACs.
///
/// Returns a secret comprised of 64 URL and command line compatible characters
/// (e.g. so that it can easily be entered on the CLI e.g. for a `--key` option ).
///
/// Uses 64 bytes because this is the maximum size possible for JWT signing keys.
/// Using a large key for JWT signing reduces the probability of brute force attacks.
/// See <https://auth0.com/blog/brute-forcing-hs256-is-possible-the-importance-of-using-strong-keys-to-sign-jwts/>.
pub fn generate() -> String
|
{
use rand::Rng;
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
let mut rng = rand::thread_rng();
(0..64)
.map(|_| {
let idx = rng.gen_range(0..CHARSET.len());
CHARSET[idx] as char
})
.collect()
}
|
identifier_body
|
|
hamming.rs
|
pub fn hamming_dist(a: &[u8], b:&[u8]) -> Option<u32> {
use std::iter::{IteratorExt, AdditiveIterator};
if a.len()!=b.len() {
return None;
}
let result = a.iter().zip(b.iter()).map(|(x,y)| {n_ones(x^y) as u32}).sum();
Some(result)
}
pub fn avg_hamming_dist(entries: &[&[u8]]) -> Option<f32> {
let n = entries.len();
if n==0 {return None};
let mut sum = 0f32;
for idx_a in 0..n {
for idx_b in (idx_a+1)..n {
let a = entries[idx_a];
let b = entries[idx_b];
match hamming_dist(a, b) {
Some(d) => sum += d as f32,
None => return None,
}
}
}
let nf32 = n as f32;
Some(sum/(nf32*(nf32+1f32)/2f32))
}
fn n_ones(n : u8) -> u8 {
let mut ones=0;
let mut val=n;
while val!= 0 {
ones += 1;
val &= val-1;
}
ones
}
#[test]
fn n_ones_test() {
assert_eq!(n_ones(5), 2);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hamming_dist_test() {
let a = "this is a test".as_bytes();
let b = "wokka wokka!!!".as_bytes();
let c = "this is not a test".as_bytes();
assert_eq!(Some(37), hamming_dist(a,b));
assert_eq!(None, hamming_dist(a,c));
}
fn avg_hamming_dist_test() {
let a = "this is a test".as_bytes();
let b = "wokka wokka!!!".as_bytes();
let c = "onyonghasayo!!".as_bytes();
let d = "this is not a test".as_bytes();
assert_eq!(avg_hamming_dist(&[a,b,c]),
Some((hamming_dist(a,b).unwrap()
|
+ hamming_dist(b,c).unwrap()
+ hamming_dist(a,c).unwrap()) as f32/3f32));
assert_eq!(avg_hamming_dist(&[a,b,c,d]), None);
}
}
|
random_line_split
|
|
hamming.rs
|
pub fn hamming_dist(a: &[u8], b:&[u8]) -> Option<u32> {
use std::iter::{IteratorExt, AdditiveIterator};
if a.len()!=b.len()
|
let result = a.iter().zip(b.iter()).map(|(x,y)| {n_ones(x^y) as u32}).sum();
Some(result)
}
pub fn avg_hamming_dist(entries: &[&[u8]]) -> Option<f32> {
let n = entries.len();
if n==0 {return None};
let mut sum = 0f32;
for idx_a in 0..n {
for idx_b in (idx_a+1)..n {
let a = entries[idx_a];
let b = entries[idx_b];
match hamming_dist(a, b) {
Some(d) => sum += d as f32,
None => return None,
}
}
}
let nf32 = n as f32;
Some(sum/(nf32*(nf32+1f32)/2f32))
}
fn n_ones(n : u8) -> u8 {
let mut ones=0;
let mut val=n;
while val!= 0 {
ones += 1;
val &= val-1;
}
ones
}
#[test]
fn n_ones_test() {
assert_eq!(n_ones(5), 2);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hamming_dist_test() {
let a = "this is a test".as_bytes();
let b = "wokka wokka!!!".as_bytes();
let c = "this is not a test".as_bytes();
assert_eq!(Some(37), hamming_dist(a,b));
assert_eq!(None, hamming_dist(a,c));
}
fn avg_hamming_dist_test() {
let a = "this is a test".as_bytes();
let b = "wokka wokka!!!".as_bytes();
let c = "onyonghasayo!!".as_bytes();
let d = "this is not a test".as_bytes();
assert_eq!(avg_hamming_dist(&[a,b,c]),
Some((hamming_dist(a,b).unwrap()
+ hamming_dist(b,c).unwrap()
+ hamming_dist(a,c).unwrap()) as f32/3f32));
assert_eq!(avg_hamming_dist(&[a,b,c,d]), None);
}
}
|
{
return None;
}
|
conditional_block
|
hamming.rs
|
pub fn hamming_dist(a: &[u8], b:&[u8]) -> Option<u32> {
use std::iter::{IteratorExt, AdditiveIterator};
if a.len()!=b.len() {
return None;
}
let result = a.iter().zip(b.iter()).map(|(x,y)| {n_ones(x^y) as u32}).sum();
Some(result)
}
pub fn avg_hamming_dist(entries: &[&[u8]]) -> Option<f32> {
let n = entries.len();
if n==0 {return None};
let mut sum = 0f32;
for idx_a in 0..n {
for idx_b in (idx_a+1)..n {
let a = entries[idx_a];
let b = entries[idx_b];
match hamming_dist(a, b) {
Some(d) => sum += d as f32,
None => return None,
}
}
}
let nf32 = n as f32;
Some(sum/(nf32*(nf32+1f32)/2f32))
}
fn n_ones(n : u8) -> u8 {
let mut ones=0;
let mut val=n;
while val!= 0 {
ones += 1;
val &= val-1;
}
ones
}
#[test]
fn n_ones_test() {
assert_eq!(n_ones(5), 2);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hamming_dist_test()
|
fn avg_hamming_dist_test() {
let a = "this is a test".as_bytes();
let b = "wokka wokka!!!".as_bytes();
let c = "onyonghasayo!!".as_bytes();
let d = "this is not a test".as_bytes();
assert_eq!(avg_hamming_dist(&[a,b,c]),
Some((hamming_dist(a,b).unwrap()
+ hamming_dist(b,c).unwrap()
+ hamming_dist(a,c).unwrap()) as f32/3f32));
assert_eq!(avg_hamming_dist(&[a,b,c,d]), None);
}
}
|
{
let a = "this is a test".as_bytes();
let b = "wokka wokka!!!".as_bytes();
let c = "this is not a test".as_bytes();
assert_eq!(Some(37), hamming_dist(a,b));
assert_eq!(None, hamming_dist(a,c));
}
|
identifier_body
|
hamming.rs
|
pub fn hamming_dist(a: &[u8], b:&[u8]) -> Option<u32> {
use std::iter::{IteratorExt, AdditiveIterator};
if a.len()!=b.len() {
return None;
}
let result = a.iter().zip(b.iter()).map(|(x,y)| {n_ones(x^y) as u32}).sum();
Some(result)
}
pub fn avg_hamming_dist(entries: &[&[u8]]) -> Option<f32> {
let n = entries.len();
if n==0 {return None};
let mut sum = 0f32;
for idx_a in 0..n {
for idx_b in (idx_a+1)..n {
let a = entries[idx_a];
let b = entries[idx_b];
match hamming_dist(a, b) {
Some(d) => sum += d as f32,
None => return None,
}
}
}
let nf32 = n as f32;
Some(sum/(nf32*(nf32+1f32)/2f32))
}
fn n_ones(n : u8) -> u8 {
let mut ones=0;
let mut val=n;
while val!= 0 {
ones += 1;
val &= val-1;
}
ones
}
#[test]
fn
|
() {
assert_eq!(n_ones(5), 2);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hamming_dist_test() {
let a = "this is a test".as_bytes();
let b = "wokka wokka!!!".as_bytes();
let c = "this is not a test".as_bytes();
assert_eq!(Some(37), hamming_dist(a,b));
assert_eq!(None, hamming_dist(a,c));
}
fn avg_hamming_dist_test() {
let a = "this is a test".as_bytes();
let b = "wokka wokka!!!".as_bytes();
let c = "onyonghasayo!!".as_bytes();
let d = "this is not a test".as_bytes();
assert_eq!(avg_hamming_dist(&[a,b,c]),
Some((hamming_dist(a,b).unwrap()
+ hamming_dist(b,c).unwrap()
+ hamming_dist(a,c).unwrap()) as f32/3f32));
assert_eq!(avg_hamming_dist(&[a,b,c,d]), None);
}
}
|
n_ones_test
|
identifier_name
|
mod.rs
|
#![cfg(target_os = "emscripten")]
use std::ffi::CString;
use libc;
use {Event, BuilderAttribs, CreationError, MouseCursor};
use Api;
use PixelFormat;
use GlContext;
use std::collections::VecDeque;
mod ffi;
pub struct Window {
context: ffi::EMSCRIPTEN_WEBGL_CONTEXT_HANDLE,
}
pub struct PollEventsIterator<'a> {
window: &'a Window,
}
impl<'a> Iterator for PollEventsIterator<'a> {
type Item = Event;
fn next(&mut self) -> Option<Event> {
None
}
}
pub struct WaitEventsIterator<'a> {
window: &'a Window,
}
impl<'a> Iterator for WaitEventsIterator<'a> {
type Item = Event;
fn next(&mut self) -> Option<Event> {
None
}
}
#[derive(Clone)]
pub struct WindowProxy;
impl WindowProxy {
pub fn wakeup_event_loop(&self) {
unimplemented!()
}
}
pub struct MonitorID;
pub fn get_available_monitors() -> VecDeque<MonitorID> {
let mut list = VecDeque::new();
list.push_back(MonitorID);
list
}
pub fn get_primary_monitor() -> MonitorID {
MonitorID
}
impl MonitorID {
pub fn get_name(&self) -> Option<String> {
Some("Canvas".to_string())
}
pub fn get_dimensions(&self) -> (u32, u32) {
unimplemented!()
}
}
impl Window {
pub fn new(builder: BuilderAttribs) -> Result<Window, CreationError> {
// getting the default values of attributes
let mut attributes = unsafe {
use std::mem;
let mut attributes: ffi::EmscriptenWebGLContextAttributes = mem::uninitialized();
ffi::emscripten_webgl_init_context_attributes(&mut attributes);
attributes
};
// setting the attributes
// FIXME:
/*match builder.gl_version {
Some((major, minor)) => {
attributes.majorVersion = major as libc::c_int;
attributes.minorVersion = minor as libc::c_int;
},
None => ()
};*/
// creating the context
let context = unsafe {
use std::{mem, ptr};
let context = ffi::emscripten_webgl_create_context(ptr::null(), &attributes);
if context <= 0 {
return Err(CreationError::OsError(format!("Error while calling emscripten_webgl_create_context: {}",
error_to_str(mem::transmute(context)))));
}
context
};
// TODO: emscripten_set_webglcontextrestored_callback
Ok(Window {
context: context
})
}
pub fn is_closed(&self) -> bool {
use std::ptr;
unsafe { ffi::emscripten_is_webgl_context_lost(ptr::null())!= 0 }
}
pub fn set_title(&self, _title: &str) {
}
pub fn get_position(&self) -> Option<(i32, i32)> {
Some((0, 0))
}
pub fn set_position(&self, _: i32, _: i32) {
}
pub fn get_inner_size(&self) -> Option<(u32, u32)> {
unsafe {
use std::{mem, ptr};
let mut width = mem::uninitialized();
let mut height = mem::uninitialized();
if ffi::emscripten_get_element_css_size(ptr::null(), &mut width, &mut height)
!= ffi::EMSCRIPTEN_RESULT_SUCCESS
{
None
} else {
Some((width as u32, height as u32))
}
}
}
pub fn get_outer_size(&self) -> Option<(u32, u32)> {
self.get_inner_size()
}
pub fn set_inner_size(&self, width: u32, height: u32) {
unsafe {
use std::ptr;
ffi::emscripten_set_element_css_size(ptr::null(), width as libc::c_double, height
as libc::c_double);
}
}
pub fn poll_events(&self) -> PollEventsIterator {
PollEventsIterator {
window: self,
}
}
pub fn wait_events(&self) -> WaitEventsIterator {
WaitEventsIterator {
window: self,
}
}
pub fn create_window_proxy(&self) -> WindowProxy {
WindowProxy
}
pub fn show(&self) {}
pub fn hide(&self) {}
pub fn platform_display(&self) -> *mut libc::c_void {
unimplemented!()
}
pub fn platform_window(&self) -> *mut libc::c_void {
unimplemented!()
}
pub fn set_window_resize_callback(&mut self, _: Option<fn(u32, u32)>) {
}
pub fn set_cursor(&self, _cursor: MouseCursor) {
unimplemented!()
}
pub fn hidpi_factor(&self) -> f32 {
1.0
}
}
impl GlContext for Window {
unsafe fn make_current(&self) {
// TOOD: check if == EMSCRIPTEN_RESULT
ffi::emscripten_webgl_make_context_current(self.context);
}
fn is_current(&self) -> bool {
true // FIXME:
}
fn get_proc_address(&self, addr: &str) -> *const libc::c_void {
let addr = CString::new(addr.as_bytes()).unwrap();
let addr = addr.as_ptr();
unsafe {
ffi::emscripten_GetProcAddress(addr) as *const _
}
}
fn swap_buffers(&self) {
unsafe {
ffi::emscripten_sleep(1); // FIXME:
}
}
fn get_api(&self) -> Api {
Api::WebGl
}
fn get_pixel_format(&self) -> PixelFormat {
unimplemented!();
}
}
impl Drop for Window {
fn drop(&mut self) {
unsafe {
ffi::emscripten_exit_fullscreen();
ffi::emscripten_webgl_destroy_context(self.context);
}
}
}
fn
|
(code: ffi::EMSCRIPTEN_RESULT) -> &'static str {
match code {
ffi::EMSCRIPTEN_RESULT_SUCCESS | ffi::EMSCRIPTEN_RESULT_DEFERRED
=> "Internal error in the library (success detected as failure)",
ffi::EMSCRIPTEN_RESULT_NOT_SUPPORTED => "Not supported",
ffi::EMSCRIPTEN_RESULT_FAILED_NOT_DEFERRED => "Failed not deferred",
ffi::EMSCRIPTEN_RESULT_INVALID_TARGET => "Invalid target",
ffi::EMSCRIPTEN_RESULT_UNKNOWN_TARGET => "Unknown target",
ffi::EMSCRIPTEN_RESULT_INVALID_PARAM => "Invalid parameter",
ffi::EMSCRIPTEN_RESULT_FAILED => "Failed",
ffi::EMSCRIPTEN_RESULT_NO_DATA => "No data",
_ => "Undocumented error"
}
}
|
error_to_str
|
identifier_name
|
mod.rs
|
#![cfg(target_os = "emscripten")]
use std::ffi::CString;
use libc;
use {Event, BuilderAttribs, CreationError, MouseCursor};
use Api;
use PixelFormat;
use GlContext;
use std::collections::VecDeque;
mod ffi;
pub struct Window {
context: ffi::EMSCRIPTEN_WEBGL_CONTEXT_HANDLE,
}
pub struct PollEventsIterator<'a> {
window: &'a Window,
}
impl<'a> Iterator for PollEventsIterator<'a> {
type Item = Event;
fn next(&mut self) -> Option<Event> {
None
}
}
pub struct WaitEventsIterator<'a> {
window: &'a Window,
}
impl<'a> Iterator for WaitEventsIterator<'a> {
type Item = Event;
fn next(&mut self) -> Option<Event> {
None
}
}
#[derive(Clone)]
pub struct WindowProxy;
impl WindowProxy {
pub fn wakeup_event_loop(&self) {
unimplemented!()
}
}
pub struct MonitorID;
pub fn get_available_monitors() -> VecDeque<MonitorID> {
let mut list = VecDeque::new();
list.push_back(MonitorID);
list
}
pub fn get_primary_monitor() -> MonitorID {
MonitorID
}
impl MonitorID {
pub fn get_name(&self) -> Option<String> {
Some("Canvas".to_string())
}
pub fn get_dimensions(&self) -> (u32, u32) {
unimplemented!()
}
}
impl Window {
pub fn new(builder: BuilderAttribs) -> Result<Window, CreationError> {
// getting the default values of attributes
let mut attributes = unsafe {
use std::mem;
let mut attributes: ffi::EmscriptenWebGLContextAttributes = mem::uninitialized();
ffi::emscripten_webgl_init_context_attributes(&mut attributes);
attributes
};
// setting the attributes
// FIXME:
/*match builder.gl_version {
Some((major, minor)) => {
attributes.majorVersion = major as libc::c_int;
attributes.minorVersion = minor as libc::c_int;
},
None => ()
};*/
// creating the context
let context = unsafe {
use std::{mem, ptr};
let context = ffi::emscripten_webgl_create_context(ptr::null(), &attributes);
if context <= 0 {
return Err(CreationError::OsError(format!("Error while calling emscripten_webgl_create_context: {}",
error_to_str(mem::transmute(context)))));
}
context
};
|
// TODO: emscripten_set_webglcontextrestored_callback
Ok(Window {
context: context
})
}
pub fn is_closed(&self) -> bool {
use std::ptr;
unsafe { ffi::emscripten_is_webgl_context_lost(ptr::null())!= 0 }
}
pub fn set_title(&self, _title: &str) {
}
pub fn get_position(&self) -> Option<(i32, i32)> {
Some((0, 0))
}
pub fn set_position(&self, _: i32, _: i32) {
}
pub fn get_inner_size(&self) -> Option<(u32, u32)> {
unsafe {
use std::{mem, ptr};
let mut width = mem::uninitialized();
let mut height = mem::uninitialized();
if ffi::emscripten_get_element_css_size(ptr::null(), &mut width, &mut height)
!= ffi::EMSCRIPTEN_RESULT_SUCCESS
{
None
} else {
Some((width as u32, height as u32))
}
}
}
pub fn get_outer_size(&self) -> Option<(u32, u32)> {
self.get_inner_size()
}
pub fn set_inner_size(&self, width: u32, height: u32) {
unsafe {
use std::ptr;
ffi::emscripten_set_element_css_size(ptr::null(), width as libc::c_double, height
as libc::c_double);
}
}
pub fn poll_events(&self) -> PollEventsIterator {
PollEventsIterator {
window: self,
}
}
pub fn wait_events(&self) -> WaitEventsIterator {
WaitEventsIterator {
window: self,
}
}
pub fn create_window_proxy(&self) -> WindowProxy {
WindowProxy
}
pub fn show(&self) {}
pub fn hide(&self) {}
pub fn platform_display(&self) -> *mut libc::c_void {
unimplemented!()
}
pub fn platform_window(&self) -> *mut libc::c_void {
unimplemented!()
}
pub fn set_window_resize_callback(&mut self, _: Option<fn(u32, u32)>) {
}
pub fn set_cursor(&self, _cursor: MouseCursor) {
unimplemented!()
}
pub fn hidpi_factor(&self) -> f32 {
1.0
}
}
impl GlContext for Window {
unsafe fn make_current(&self) {
// TOOD: check if == EMSCRIPTEN_RESULT
ffi::emscripten_webgl_make_context_current(self.context);
}
fn is_current(&self) -> bool {
true // FIXME:
}
fn get_proc_address(&self, addr: &str) -> *const libc::c_void {
let addr = CString::new(addr.as_bytes()).unwrap();
let addr = addr.as_ptr();
unsafe {
ffi::emscripten_GetProcAddress(addr) as *const _
}
}
fn swap_buffers(&self) {
unsafe {
ffi::emscripten_sleep(1); // FIXME:
}
}
fn get_api(&self) -> Api {
Api::WebGl
}
fn get_pixel_format(&self) -> PixelFormat {
unimplemented!();
}
}
impl Drop for Window {
fn drop(&mut self) {
unsafe {
ffi::emscripten_exit_fullscreen();
ffi::emscripten_webgl_destroy_context(self.context);
}
}
}
fn error_to_str(code: ffi::EMSCRIPTEN_RESULT) -> &'static str {
match code {
ffi::EMSCRIPTEN_RESULT_SUCCESS | ffi::EMSCRIPTEN_RESULT_DEFERRED
=> "Internal error in the library (success detected as failure)",
ffi::EMSCRIPTEN_RESULT_NOT_SUPPORTED => "Not supported",
ffi::EMSCRIPTEN_RESULT_FAILED_NOT_DEFERRED => "Failed not deferred",
ffi::EMSCRIPTEN_RESULT_INVALID_TARGET => "Invalid target",
ffi::EMSCRIPTEN_RESULT_UNKNOWN_TARGET => "Unknown target",
ffi::EMSCRIPTEN_RESULT_INVALID_PARAM => "Invalid parameter",
ffi::EMSCRIPTEN_RESULT_FAILED => "Failed",
ffi::EMSCRIPTEN_RESULT_NO_DATA => "No data",
_ => "Undocumented error"
}
}
|
random_line_split
|
|
mod.rs
|
#![cfg(target_os = "emscripten")]
use std::ffi::CString;
use libc;
use {Event, BuilderAttribs, CreationError, MouseCursor};
use Api;
use PixelFormat;
use GlContext;
use std::collections::VecDeque;
mod ffi;
pub struct Window {
context: ffi::EMSCRIPTEN_WEBGL_CONTEXT_HANDLE,
}
pub struct PollEventsIterator<'a> {
window: &'a Window,
}
impl<'a> Iterator for PollEventsIterator<'a> {
type Item = Event;
fn next(&mut self) -> Option<Event> {
None
}
}
pub struct WaitEventsIterator<'a> {
window: &'a Window,
}
impl<'a> Iterator for WaitEventsIterator<'a> {
type Item = Event;
fn next(&mut self) -> Option<Event> {
None
}
}
#[derive(Clone)]
pub struct WindowProxy;
impl WindowProxy {
pub fn wakeup_event_loop(&self) {
unimplemented!()
}
}
pub struct MonitorID;
pub fn get_available_monitors() -> VecDeque<MonitorID> {
let mut list = VecDeque::new();
list.push_back(MonitorID);
list
}
pub fn get_primary_monitor() -> MonitorID {
MonitorID
}
impl MonitorID {
pub fn get_name(&self) -> Option<String> {
Some("Canvas".to_string())
}
pub fn get_dimensions(&self) -> (u32, u32) {
unimplemented!()
}
}
impl Window {
pub fn new(builder: BuilderAttribs) -> Result<Window, CreationError> {
// getting the default values of attributes
let mut attributes = unsafe {
use std::mem;
let mut attributes: ffi::EmscriptenWebGLContextAttributes = mem::uninitialized();
ffi::emscripten_webgl_init_context_attributes(&mut attributes);
attributes
};
// setting the attributes
// FIXME:
/*match builder.gl_version {
Some((major, minor)) => {
attributes.majorVersion = major as libc::c_int;
attributes.minorVersion = minor as libc::c_int;
},
None => ()
};*/
// creating the context
let context = unsafe {
use std::{mem, ptr};
let context = ffi::emscripten_webgl_create_context(ptr::null(), &attributes);
if context <= 0 {
return Err(CreationError::OsError(format!("Error while calling emscripten_webgl_create_context: {}",
error_to_str(mem::transmute(context)))));
}
context
};
// TODO: emscripten_set_webglcontextrestored_callback
Ok(Window {
context: context
})
}
pub fn is_closed(&self) -> bool {
use std::ptr;
unsafe { ffi::emscripten_is_webgl_context_lost(ptr::null())!= 0 }
}
pub fn set_title(&self, _title: &str)
|
pub fn get_position(&self) -> Option<(i32, i32)> {
Some((0, 0))
}
pub fn set_position(&self, _: i32, _: i32) {
}
pub fn get_inner_size(&self) -> Option<(u32, u32)> {
unsafe {
use std::{mem, ptr};
let mut width = mem::uninitialized();
let mut height = mem::uninitialized();
if ffi::emscripten_get_element_css_size(ptr::null(), &mut width, &mut height)
!= ffi::EMSCRIPTEN_RESULT_SUCCESS
{
None
} else {
Some((width as u32, height as u32))
}
}
}
pub fn get_outer_size(&self) -> Option<(u32, u32)> {
self.get_inner_size()
}
pub fn set_inner_size(&self, width: u32, height: u32) {
unsafe {
use std::ptr;
ffi::emscripten_set_element_css_size(ptr::null(), width as libc::c_double, height
as libc::c_double);
}
}
pub fn poll_events(&self) -> PollEventsIterator {
PollEventsIterator {
window: self,
}
}
pub fn wait_events(&self) -> WaitEventsIterator {
WaitEventsIterator {
window: self,
}
}
pub fn create_window_proxy(&self) -> WindowProxy {
WindowProxy
}
pub fn show(&self) {}
pub fn hide(&self) {}
pub fn platform_display(&self) -> *mut libc::c_void {
unimplemented!()
}
pub fn platform_window(&self) -> *mut libc::c_void {
unimplemented!()
}
pub fn set_window_resize_callback(&mut self, _: Option<fn(u32, u32)>) {
}
pub fn set_cursor(&self, _cursor: MouseCursor) {
unimplemented!()
}
pub fn hidpi_factor(&self) -> f32 {
1.0
}
}
impl GlContext for Window {
unsafe fn make_current(&self) {
// TOOD: check if == EMSCRIPTEN_RESULT
ffi::emscripten_webgl_make_context_current(self.context);
}
fn is_current(&self) -> bool {
true // FIXME:
}
fn get_proc_address(&self, addr: &str) -> *const libc::c_void {
let addr = CString::new(addr.as_bytes()).unwrap();
let addr = addr.as_ptr();
unsafe {
ffi::emscripten_GetProcAddress(addr) as *const _
}
}
fn swap_buffers(&self) {
unsafe {
ffi::emscripten_sleep(1); // FIXME:
}
}
fn get_api(&self) -> Api {
Api::WebGl
}
fn get_pixel_format(&self) -> PixelFormat {
unimplemented!();
}
}
impl Drop for Window {
fn drop(&mut self) {
unsafe {
ffi::emscripten_exit_fullscreen();
ffi::emscripten_webgl_destroy_context(self.context);
}
}
}
fn error_to_str(code: ffi::EMSCRIPTEN_RESULT) -> &'static str {
match code {
ffi::EMSCRIPTEN_RESULT_SUCCESS | ffi::EMSCRIPTEN_RESULT_DEFERRED
=> "Internal error in the library (success detected as failure)",
ffi::EMSCRIPTEN_RESULT_NOT_SUPPORTED => "Not supported",
ffi::EMSCRIPTEN_RESULT_FAILED_NOT_DEFERRED => "Failed not deferred",
ffi::EMSCRIPTEN_RESULT_INVALID_TARGET => "Invalid target",
ffi::EMSCRIPTEN_RESULT_UNKNOWN_TARGET => "Unknown target",
ffi::EMSCRIPTEN_RESULT_INVALID_PARAM => "Invalid parameter",
ffi::EMSCRIPTEN_RESULT_FAILED => "Failed",
ffi::EMSCRIPTEN_RESULT_NO_DATA => "No data",
_ => "Undocumented error"
}
}
|
{
}
|
identifier_body
|
fs.rs
|
46, 46, 0,..] => return None,
_ => {}
}
Some(DirEntry {
root: root.clone(),
data: *wfd,
})
}
pub fn path(&self) -> PathBuf {
self.root.join(&self.file_name())
}
pub fn file_name(&self) -> OsString {
let filename = super::truncate_utf16_at_nul(&self.data.cFileName);
OsString::from_wide(filename)
}
pub fn file_type(&self) -> io::Result<FileType> {
Ok(FileType::new(self.data.dwFileAttributes,
/* reparse_tag = */ self.data.dwReserved0))
}
pub fn metadata(&self) -> io::Result<FileAttr> {
Ok(FileAttr {
data: c::WIN32_FILE_ATTRIBUTE_DATA {
dwFileAttributes: self.data.dwFileAttributes,
ftCreationTime: self.data.ftCreationTime,
ftLastAccessTime: self.data.ftLastAccessTime,
ftLastWriteTime: self.data.ftLastWriteTime,
nFileSizeHigh: self.data.nFileSizeHigh,
nFileSizeLow: self.data.nFileSizeLow,
},
reparse_tag: self.data.dwReserved0,
})
}
}
impl OpenOptions {
pub fn new() -> OpenOptions { Default::default() }
pub fn read(&mut self, read: bool) { self.read = read; }
pub fn write(&mut self, write: bool) { self.write = write; }
pub fn append(&mut self, append: bool) { self.append = append; }
pub fn create(&mut self, create: bool) { self.create = create; }
pub fn truncate(&mut self, truncate: bool) { self.truncate = truncate; }
pub fn creation_disposition(&mut self, val: u32) {
self.creation_disposition = Some(val);
}
pub fn flags_and_attributes(&mut self, val: u32) {
self.flags_and_attributes = Some(val);
}
pub fn desired_access(&mut self, val: u32) {
self.desired_access = Some(val);
}
pub fn share_mode(&mut self, val: u32) {
self.share_mode = Some(val);
}
pub fn security_attributes(&mut self, attrs: libc::LPSECURITY_ATTRIBUTES) {
self.security_attributes = attrs as usize;
}
fn get_desired_access(&self) -> libc::DWORD {
self.desired_access.unwrap_or({
let mut base = if self.read {libc::FILE_GENERIC_READ} else {0} |
if self.write {libc::FILE_GENERIC_WRITE} else {0};
if self.append {
base &=!libc::FILE_WRITE_DATA;
base |= libc::FILE_APPEND_DATA;
}
base
})
}
fn get_share_mode(&self) -> libc::DWORD {
// libuv has a good comment about this, but the basic idea is that
// we try to emulate unix semantics by enabling all sharing by
// allowing things such as deleting a file while it's still open.
self.share_mode.unwrap_or(libc::FILE_SHARE_READ |
libc::FILE_SHARE_WRITE |
libc::FILE_SHARE_DELETE)
}
fn get_creation_disposition(&self) -> libc::DWORD {
self.creation_disposition.unwrap_or({
match (self.create, self.truncate) {
(true, true) => libc::CREATE_ALWAYS,
(true, false) => libc::OPEN_ALWAYS,
(false, false) => libc::OPEN_EXISTING,
(false, true) => {
if self.write &&!self.append {
libc::CREATE_ALWAYS
} else {
libc::TRUNCATE_EXISTING
}
}
}
})
}
fn get_flags_and_attributes(&self) -> libc::DWORD {
self.flags_and_attributes.unwrap_or(libc::FILE_ATTRIBUTE_NORMAL)
}
}
impl File {
fn open_reparse_point(path: &Path, write: bool) -> io::Result<File> {
let mut opts = OpenOptions::new();
opts.read(!write);
opts.write(write);
opts.flags_and_attributes(c::FILE_FLAG_OPEN_REPARSE_POINT |
c::FILE_FLAG_BACKUP_SEMANTICS);
File::open(path, &opts)
}
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = to_utf16(path);
let handle = unsafe {
libc::CreateFileW(path.as_ptr(),
opts.get_desired_access(),
opts.get_share_mode(),
opts.security_attributes as *mut _,
opts.get_creation_disposition(),
opts.get_flags_and_attributes(),
ptr::null_mut())
};
if handle == libc::INVALID_HANDLE_VALUE {
Err(Error::last_os_error())
} else {
Ok(File { handle: Handle::new(handle) })
}
}
pub fn fsync(&self) -> io::Result<()> {
try!(cvt(unsafe { libc::FlushFileBuffers(self.handle.raw()) }));
Ok(())
}
pub fn datasync(&self) -> io::Result<()> { self.fsync() }
pub fn truncate(&self, size: u64) -> io::Result<()> {
let mut info = c::FILE_END_OF_FILE_INFO {
EndOfFile: size as libc::LARGE_INTEGER,
};
let size = mem::size_of_val(&info);
try!(cvt(unsafe {
c::SetFileInformationByHandle(self.handle.raw(),
c::FileEndOfFileInfo,
&mut info as *mut _ as *mut _,
size as libc::DWORD)
}));
Ok(())
}
pub fn file_attr(&self) -> io::Result<FileAttr> {
unsafe {
let mut info: c::BY_HANDLE_FILE_INFORMATION = mem::zeroed();
try!(cvt(c::GetFileInformationByHandle(self.handle.raw(),
&mut info)));
let mut attr = FileAttr {
data: c::WIN32_FILE_ATTRIBUTE_DATA {
dwFileAttributes: info.dwFileAttributes,
ftCreationTime: info.ftCreationTime,
ftLastAccessTime: info.ftLastAccessTime,
ftLastWriteTime: info.ftLastWriteTime,
nFileSizeHigh: info.nFileSizeHigh,
nFileSizeLow: info.nFileSizeLow,
},
reparse_tag: 0,
};
if attr.is_reparse_point() {
let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
if let Ok((_, buf)) = self.reparse_point(&mut b) {
attr.reparse_tag = buf.ReparseTag;
}
}
Ok(attr)
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.handle.read(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.handle.write(buf)
}
pub fn flush(&self) -> io::Result<()> { Ok(()) }
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
SeekFrom::Start(n) => (libc::FILE_BEGIN, n as i64),
SeekFrom::End(n) => (libc::FILE_END, n),
SeekFrom::Current(n) => (libc::FILE_CURRENT, n),
};
let pos = pos as libc::LARGE_INTEGER;
let mut newpos = 0;
try!(cvt(unsafe {
libc::SetFilePointerEx(self.handle.raw(), pos,
&mut newpos, whence)
}));
Ok(newpos as u64)
}
pub fn handle(&self) -> &Handle { &self.handle }
pub fn into_handle(self) -> Handle { self.handle }
fn reparse_point<'a>(&self,
space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE])
-> io::Result<(libc::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
unsafe {
let mut bytes = 0;
try!(cvt({
c::DeviceIoControl(self.handle.raw(),
c::FSCTL_GET_REPARSE_POINT,
0 as *mut _,
0,
space.as_mut_ptr() as *mut _,
space.len() as libc::DWORD,
&mut bytes,
0 as *mut _)
}));
Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
}
}
fn readlink(&self) -> io::Result<PathBuf> {
let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
let (_bytes, buf) = try!(self.reparse_point(&mut space));
if buf.ReparseTag!= c::IO_REPARSE_TAG_SYMLINK {
return Err(io::Error::new(io::ErrorKind::Other, "not a symlink"))
}
unsafe {
let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER =
&buf.rest as *const _ as *const _;
let path_buffer = &(*info).PathBuffer as *const _ as *const u16;
let subst_off = (*info).SubstituteNameOffset / 2;
let subst_ptr = path_buffer.offset(subst_off as isize);
let subst_len = (*info).SubstituteNameLength / 2;
let subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
Ok(PathBuf::from(OsString::from_wide(subst)))
}
}
}
impl FromInner<libc::HANDLE> for File {
fn from_inner(handle: libc::HANDLE) -> File {
File { handle: Handle::new(handle) }
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// FIXME(#24570): add more info here (e.g. mode)
let mut b = f.debug_struct("File");
b.field("handle", &self.handle.raw());
if let Ok(path) = get_path(&self) {
b.field("path", &path);
}
b.finish()
}
}
pub fn to_utf16(s: &Path) -> Vec<u16> {
s.as_os_str().encode_wide().chain(Some(0)).collect()
}
impl FileAttr {
pub fn size(&self) -> u64 {
((self.data.nFileSizeHigh as u64) << 32) | (self.data.nFileSizeLow as u64)
}
pub fn perm(&self) -> FilePermissions {
FilePermissions { attrs: self.data.dwFileAttributes }
}
pub fn attrs(&self) -> u32 { self.data.dwFileAttributes as u32 }
pub fn file_type(&self) -> FileType {
FileType::new(self.data.dwFileAttributes, self.reparse_tag)
}
pub fn created(&self) -> u64 { self.to_u64(&self.data.ftCreationTime) }
pub fn accessed(&self) -> u64 { self.to_u64(&self.data.ftLastAccessTime) }
pub fn modified(&self) -> u64 { self.to_u64(&self.data.ftLastWriteTime) }
fn to_u64(&self, ft: &libc::FILETIME) -> u64 {
(ft.dwLowDateTime as u64) | ((ft.dwHighDateTime as u64) << 32)
}
fn is_reparse_point(&self) -> bool {
self.data.dwFileAttributes & libc::FILE_ATTRIBUTE_REPARSE_POINT!= 0
}
}
impl FilePermissions {
pub fn readonly(&self) -> bool {
self.attrs & c::FILE_ATTRIBUTE_READONLY!= 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.attrs |= c::FILE_ATTRIBUTE_READONLY;
} else {
self.attrs &=!c::FILE_ATTRIBUTE_READONLY;
}
}
}
impl FileType {
fn new(attrs: libc::DWORD, reparse_tag: libc::DWORD) -> FileType {
if attrs & libc::FILE_ATTRIBUTE_REPARSE_POINT!= 0 {
match reparse_tag {
c::IO_REPARSE_TAG_SYMLINK => FileType::Symlink,
c::IO_REPARSE_TAG_MOUNT_POINT => FileType::MountPoint,
_ => FileType::ReparsePoint,
}
} else if attrs & c::FILE_ATTRIBUTE_DIRECTORY!= 0 {
FileType::Dir
} else {
FileType::File
}
}
pub fn is_dir(&self) -> bool { *self == FileType::Dir }
pub fn is_file(&self) -> bool { *self == FileType::File }
pub fn is_symlink(&self) -> bool {
*self == FileType::Symlink || *self == FileType::MountPoint
}
}
impl DirBuilder {
pub fn new() -> DirBuilder { DirBuilder }
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
let p = to_utf16(p);
try!(cvt(unsafe {
libc::CreateDirectoryW(p.as_ptr(), ptr::null_mut())
}));
Ok(())
}
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = p.to_path_buf();
let star = p.join("*");
let path = to_utf16(&star);
unsafe {
let mut wfd = mem::zeroed();
let find_handle = libc::FindFirstFileW(path.as_ptr(), &mut wfd);
if find_handle!= libc::INVALID_HANDLE_VALUE {
Ok(ReadDir {
handle: FindNextFileHandle(find_handle),
root: Arc::new(root),
first: Some(wfd),
})
} else {
Err(Error::last_os_error())
}
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
let p_utf16 = to_utf16(p);
try!(cvt(unsafe { libc::DeleteFileW(p_utf16.as_ptr()) }));
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
let old = to_utf16(old);
let new = to_utf16(new);
try!(cvt(unsafe {
libc::MoveFileExW(old.as_ptr(), new.as_ptr(),
libc::MOVEFILE_REPLACE_EXISTING)
}));
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
let p = to_utf16(p);
try!(cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) }));
Ok(())
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
let file = try!(File::open_reparse_point(p, false));
file.readlink()
}
pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
symlink_inner(src, dst, false)
}
pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> {
let src = to_utf16(src);
let dst = to_utf16(dst);
let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
try!(cvt(unsafe {
c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as libc::BOOL
}));
Ok(())
}
pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
let src = to_utf16(src);
let dst = to_utf16(dst);
try!(cvt(unsafe {
libc::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::null_mut())
}));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let attr = try!(lstat(p));
// If this is a reparse point, then we need to reopen the file to get the
// actual destination. We also pass the FILE_FLAG_BACKUP_SEMANTICS flag to
// ensure that we can open directories (this path may be a directory
// junction). Once the file is opened we ask the opened handle what its
// metadata information is.
if attr.is_reparse_point() {
let mut opts = OpenOptions::new();
opts.flags_and_attributes(c::FILE_FLAG_BACKUP_SEMANTICS);
let file = try!(File::open(p, &opts));
file.file_attr()
} else {
Ok(attr)
}
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
let utf16 = to_utf16(p);
unsafe {
let mut attr: FileAttr = mem::zeroed();
try!(cvt(c::GetFileAttributesExW(utf16.as_ptr(),
c::GetFileExInfoStandard,
&mut attr.data as *mut _ as *mut _)));
if attr.is_reparse_point() {
attr.reparse_tag = File::open_reparse_point(p, false).and_then(|f| {
let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
f.reparse_point(&mut b).map(|(_, b)| b.ReparseTag)
}).unwrap_or(0);
}
Ok(attr)
}
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
let p = to_utf16(p);
unsafe {
try!(cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs)));
Ok(())
}
}
fn get_path(f: &File) -> io::Result<PathBuf> {
super::fill_utf16_buf(|buf, sz| unsafe {
c::GetFinalPathNameByHandleW(f.handle.raw(), buf, sz,
libc::VOLUME_NAME_DOS)
}, |buf| {
PathBuf::from(OsString::from_wide(buf))
})
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
let mut opts = OpenOptions::new();
opts.read(true);
let f = try!(File::open(p, &opts));
get_path(&f)
}
pub fn copy(from: &Path, to: &Path) -> io::Result<u64>
|
{
unsafe extern "system" fn callback(
_TotalFileSize: libc::LARGE_INTEGER,
TotalBytesTransferred: libc::LARGE_INTEGER,
_StreamSize: libc::LARGE_INTEGER,
_StreamBytesTransferred: libc::LARGE_INTEGER,
_dwStreamNumber: libc::DWORD,
_dwCallbackReason: libc::DWORD,
_hSourceFile: HANDLE,
_hDestinationFile: HANDLE,
lpData: libc::LPVOID,
) -> libc::DWORD {
*(lpData as *mut i64) = TotalBytesTransferred;
c::PROGRESS_CONTINUE
}
let pfrom = to_utf16(from);
let pto = to_utf16(to);
let mut size = 0i64;
try!(cvt(unsafe {
c::CopyFileExW(pfrom.as_ptr(), pto.as_ptr(), Some(callback),
|
identifier_body
|
|
fs.rs
|
}
}
impl DirEntry {
fn new(root: &Arc<PathBuf>, wfd: &libc::WIN32_FIND_DATAW) -> Option<DirEntry> {
match &wfd.cFileName[0..3] {
// check for '.' and '..'
[46, 0,..] |
[46, 46, 0,..] => return None,
_ => {}
}
Some(DirEntry {
root: root.clone(),
data: *wfd,
})
}
pub fn path(&self) -> PathBuf {
self.root.join(&self.file_name())
}
pub fn file_name(&self) -> OsString {
let filename = super::truncate_utf16_at_nul(&self.data.cFileName);
OsString::from_wide(filename)
}
pub fn file_type(&self) -> io::Result<FileType> {
Ok(FileType::new(self.data.dwFileAttributes,
/* reparse_tag = */ self.data.dwReserved0))
}
pub fn metadata(&self) -> io::Result<FileAttr> {
Ok(FileAttr {
data: c::WIN32_FILE_ATTRIBUTE_DATA {
dwFileAttributes: self.data.dwFileAttributes,
ftCreationTime: self.data.ftCreationTime,
ftLastAccessTime: self.data.ftLastAccessTime,
ftLastWriteTime: self.data.ftLastWriteTime,
nFileSizeHigh: self.data.nFileSizeHigh,
nFileSizeLow: self.data.nFileSizeLow,
},
reparse_tag: self.data.dwReserved0,
})
}
}
impl OpenOptions {
pub fn new() -> OpenOptions { Default::default() }
pub fn read(&mut self, read: bool) { self.read = read; }
pub fn write(&mut self, write: bool) { self.write = write; }
pub fn append(&mut self, append: bool) { self.append = append; }
pub fn create(&mut self, create: bool) { self.create = create; }
pub fn truncate(&mut self, truncate: bool) { self.truncate = truncate; }
pub fn creation_disposition(&mut self, val: u32) {
self.creation_disposition = Some(val);
}
pub fn flags_and_attributes(&mut self, val: u32) {
self.flags_and_attributes = Some(val);
}
pub fn desired_access(&mut self, val: u32) {
self.desired_access = Some(val);
}
pub fn share_mode(&mut self, val: u32) {
self.share_mode = Some(val);
}
pub fn security_attributes(&mut self, attrs: libc::LPSECURITY_ATTRIBUTES) {
self.security_attributes = attrs as usize;
}
fn get_desired_access(&self) -> libc::DWORD {
self.desired_access.unwrap_or({
let mut base = if self.read {libc::FILE_GENERIC_READ} else {0} |
if self.write {libc::FILE_GENERIC_WRITE} else {0};
if self.append {
base &=!libc::FILE_WRITE_DATA;
base |= libc::FILE_APPEND_DATA;
}
base
})
}
fn get_share_mode(&self) -> libc::DWORD {
// libuv has a good comment about this, but the basic idea is that
// we try to emulate unix semantics by enabling all sharing by
// allowing things such as deleting a file while it's still open.
self.share_mode.unwrap_or(libc::FILE_SHARE_READ |
libc::FILE_SHARE_WRITE |
libc::FILE_SHARE_DELETE)
}
fn get_creation_disposition(&self) -> libc::DWORD {
self.creation_disposition.unwrap_or({
match (self.create, self.truncate) {
(true, true) => libc::CREATE_ALWAYS,
(true, false) => libc::OPEN_ALWAYS,
(false, false) => libc::OPEN_EXISTING,
(false, true) => {
if self.write &&!self.append {
libc::CREATE_ALWAYS
} else {
libc::TRUNCATE_EXISTING
}
}
}
})
}
fn get_flags_and_attributes(&self) -> libc::DWORD {
self.flags_and_attributes.unwrap_or(libc::FILE_ATTRIBUTE_NORMAL)
}
}
impl File {
fn open_reparse_point(path: &Path, write: bool) -> io::Result<File> {
let mut opts = OpenOptions::new();
opts.read(!write);
opts.write(write);
opts.flags_and_attributes(c::FILE_FLAG_OPEN_REPARSE_POINT |
c::FILE_FLAG_BACKUP_SEMANTICS);
File::open(path, &opts)
}
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = to_utf16(path);
let handle = unsafe {
libc::CreateFileW(path.as_ptr(),
opts.get_desired_access(),
opts.get_share_mode(),
opts.security_attributes as *mut _,
opts.get_creation_disposition(),
opts.get_flags_and_attributes(),
ptr::null_mut())
};
if handle == libc::INVALID_HANDLE_VALUE {
Err(Error::last_os_error())
} else {
Ok(File { handle: Handle::new(handle) })
}
}
pub fn fsync(&self) -> io::Result<()> {
try!(cvt(unsafe { libc::FlushFileBuffers(self.handle.raw()) }));
Ok(())
}
pub fn datasync(&self) -> io::Result<()> { self.fsync() }
pub fn truncate(&self, size: u64) -> io::Result<()> {
let mut info = c::FILE_END_OF_FILE_INFO {
EndOfFile: size as libc::LARGE_INTEGER,
};
let size = mem::size_of_val(&info);
try!(cvt(unsafe {
c::SetFileInformationByHandle(self.handle.raw(),
c::FileEndOfFileInfo,
&mut info as *mut _ as *mut _,
size as libc::DWORD)
}));
Ok(())
}
pub fn file_attr(&self) -> io::Result<FileAttr> {
unsafe {
let mut info: c::BY_HANDLE_FILE_INFORMATION = mem::zeroed();
try!(cvt(c::GetFileInformationByHandle(self.handle.raw(),
&mut info)));
let mut attr = FileAttr {
data: c::WIN32_FILE_ATTRIBUTE_DATA {
dwFileAttributes: info.dwFileAttributes,
ftCreationTime: info.ftCreationTime,
ftLastAccessTime: info.ftLastAccessTime,
ftLastWriteTime: info.ftLastWriteTime,
nFileSizeHigh: info.nFileSizeHigh,
nFileSizeLow: info.nFileSizeLow,
},
reparse_tag: 0,
};
if attr.is_reparse_point()
|
Ok(attr)
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.handle.read(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.handle.write(buf)
}
pub fn flush(&self) -> io::Result<()> { Ok(()) }
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
SeekFrom::Start(n) => (libc::FILE_BEGIN, n as i64),
SeekFrom::End(n) => (libc::FILE_END, n),
SeekFrom::Current(n) => (libc::FILE_CURRENT, n),
};
let pos = pos as libc::LARGE_INTEGER;
let mut newpos = 0;
try!(cvt(unsafe {
libc::SetFilePointerEx(self.handle.raw(), pos,
&mut newpos, whence)
}));
Ok(newpos as u64)
}
pub fn handle(&self) -> &Handle { &self.handle }
pub fn into_handle(self) -> Handle { self.handle }
fn reparse_point<'a>(&self,
space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE])
-> io::Result<(libc::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
unsafe {
let mut bytes = 0;
try!(cvt({
c::DeviceIoControl(self.handle.raw(),
c::FSCTL_GET_REPARSE_POINT,
0 as *mut _,
0,
space.as_mut_ptr() as *mut _,
space.len() as libc::DWORD,
&mut bytes,
0 as *mut _)
}));
Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
}
}
fn readlink(&self) -> io::Result<PathBuf> {
let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
let (_bytes, buf) = try!(self.reparse_point(&mut space));
if buf.ReparseTag!= c::IO_REPARSE_TAG_SYMLINK {
return Err(io::Error::new(io::ErrorKind::Other, "not a symlink"))
}
unsafe {
let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER =
&buf.rest as *const _ as *const _;
let path_buffer = &(*info).PathBuffer as *const _ as *const u16;
let subst_off = (*info).SubstituteNameOffset / 2;
let subst_ptr = path_buffer.offset(subst_off as isize);
let subst_len = (*info).SubstituteNameLength / 2;
let subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
Ok(PathBuf::from(OsString::from_wide(subst)))
}
}
}
impl FromInner<libc::HANDLE> for File {
fn from_inner(handle: libc::HANDLE) -> File {
File { handle: Handle::new(handle) }
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// FIXME(#24570): add more info here (e.g. mode)
let mut b = f.debug_struct("File");
b.field("handle", &self.handle.raw());
if let Ok(path) = get_path(&self) {
b.field("path", &path);
}
b.finish()
}
}
pub fn to_utf16(s: &Path) -> Vec<u16> {
s.as_os_str().encode_wide().chain(Some(0)).collect()
}
impl FileAttr {
pub fn size(&self) -> u64 {
((self.data.nFileSizeHigh as u64) << 32) | (self.data.nFileSizeLow as u64)
}
pub fn perm(&self) -> FilePermissions {
FilePermissions { attrs: self.data.dwFileAttributes }
}
pub fn attrs(&self) -> u32 { self.data.dwFileAttributes as u32 }
pub fn file_type(&self) -> FileType {
FileType::new(self.data.dwFileAttributes, self.reparse_tag)
}
pub fn created(&self) -> u64 { self.to_u64(&self.data.ftCreationTime) }
pub fn accessed(&self) -> u64 { self.to_u64(&self.data.ftLastAccessTime) }
pub fn modified(&self) -> u64 { self.to_u64(&self.data.ftLastWriteTime) }
fn to_u64(&self, ft: &libc::FILETIME) -> u64 {
(ft.dwLowDateTime as u64) | ((ft.dwHighDateTime as u64) << 32)
}
fn is_reparse_point(&self) -> bool {
self.data.dwFileAttributes & libc::FILE_ATTRIBUTE_REPARSE_POINT!= 0
}
}
impl FilePermissions {
pub fn readonly(&self) -> bool {
self.attrs & c::FILE_ATTRIBUTE_READONLY!= 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.attrs |= c::FILE_ATTRIBUTE_READONLY;
} else {
self.attrs &=!c::FILE_ATTRIBUTE_READONLY;
}
}
}
impl FileType {
fn new(attrs: libc::DWORD, reparse_tag: libc::DWORD) -> FileType {
if attrs & libc::FILE_ATTRIBUTE_REPARSE_POINT!= 0 {
match reparse_tag {
c::IO_REPARSE_TAG_SYMLINK => FileType::Symlink,
c::IO_REPARSE_TAG_MOUNT_POINT => FileType::MountPoint,
_ => FileType::ReparsePoint,
}
} else if attrs & c::FILE_ATTRIBUTE_DIRECTORY!= 0 {
FileType::Dir
} else {
FileType::File
}
}
pub fn is_dir(&self) -> bool { *self == FileType::Dir }
pub fn is_file(&self) -> bool { *self == FileType::File }
pub fn is_symlink(&self) -> bool {
*self == FileType::Symlink || *self == FileType::MountPoint
}
}
impl DirBuilder {
pub fn new() -> DirBuilder { DirBuilder }
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
let p = to_utf16(p);
try!(cvt(unsafe {
libc::CreateDirectoryW(p.as_ptr(), ptr::null_mut())
}));
Ok(())
}
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = p.to_path_buf();
let star = p.join("*");
let path = to_utf16(&star);
unsafe {
let mut wfd = mem::zeroed();
let find_handle = libc::FindFirstFileW(path.as_ptr(), &mut wfd);
if find_handle!= libc::INVALID_HANDLE_VALUE {
Ok(ReadDir {
handle: FindNextFileHandle(find_handle),
root: Arc::new(root),
first: Some(wfd),
})
} else {
Err(Error::last_os_error())
}
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
let p_utf16 = to_utf16(p);
try!(cvt(unsafe { libc::DeleteFileW(p_utf16.as_ptr()) }));
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
let old = to_utf16(old);
let new = to_utf16(new);
try!(cvt(unsafe {
libc::MoveFileExW(old.as_ptr(), new.as_ptr(),
libc::MOVEFILE_REPLACE_EXISTING)
}));
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
let p = to_utf16(p);
try!(cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) }));
Ok(())
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
let file = try!(File::open_reparse_point(p, false));
file.readlink()
}
pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
symlink_inner(src, dst, false)
}
pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> {
let src = to_utf16(src);
let dst = to_utf16(dst);
let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
try!(cvt(unsafe {
c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as libc::BOOL
}));
Ok(())
}
pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
let src = to_utf16(src);
let dst = to_utf16(dst);
try!(cvt(unsafe {
libc::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::null_mut())
}));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let attr = try!(lstat(p));
// If this is a reparse point, then we need to reopen the file to get the
// actual destination. We also pass the FILE_FLAG_BACKUP_SEMANTICS flag to
// ensure that we can open directories (this path may be a directory
// junction). Once the file is opened we ask the opened handle what its
// metadata information is.
if attr.is_reparse_point() {
let mut opts = OpenOptions::new();
opts.flags_and_attributes(c::FILE_FLAG_BACKUP_SEMANTICS);
let file = try!(File::open(p, &opts));
file.file_attr()
} else {
Ok(attr)
}
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
let utf16 = to_utf16(p);
unsafe {
let mut attr: FileAttr = mem::zeroed();
try!(cvt(c::GetFileAttributesExW(utf16.as_ptr(),
c::GetFileExInfoStandard,
&mut attr.data as *mut _ as *mut _)));
if attr.is_reparse_point() {
attr.reparse_tag = File::open_reparse_point(p, false).and_then(|f| {
let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
f.reparse_point(&mut b).map(|(_, b)| b.ReparseTag)
}).unwrap_or(0);
}
Ok(attr)
}
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
let p = to_utf16(p);
unsafe {
try!(cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs)));
Ok(())
}
}
fn get_path(f: &File) -> io::Result<PathBuf> {
super::fill_utf16_buf(|buf, sz| unsafe {
c::GetFinalPathNameByHandleW(f.handle.raw(), buf, sz,
libc::VOLUME_NAME_DOS)
}, |buf| {
PathBuf::from(OsString::from_wide(buf))
})
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
let mut opts = OpenOptions::new();
opts.read(true);
let f = try!(File::open(p, &opts));
get_path(&f)
}
pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
unsafe extern "system" fn callback(
_TotalFileSize: libc::LARGE_INTEGER,
TotalBytesTransferred: libc::LARGE_INTEGER,
_StreamSize: libc::LARGE_INTEGER,
_StreamBytesTransferred: libc::LARGE_INTEGER,
_dwStreamNumber: libc::DWORD,
_dwCallbackReason: libc::DWORD,
_hSourceFile: HANDLE,
_hDestinationFile: HANDLE,
lpData: libc::LPVOID,
) -> libc::DWORD {
|
{
let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
if let Ok((_, buf)) = self.reparse_point(&mut b) {
attr.reparse_tag = buf.ReparseTag;
}
}
|
conditional_block
|
fs.rs
|
(&self) -> libc::DWORD {
self.flags_and_attributes.unwrap_or(libc::FILE_ATTRIBUTE_NORMAL)
}
}
impl File {
fn open_reparse_point(path: &Path, write: bool) -> io::Result<File> {
let mut opts = OpenOptions::new();
opts.read(!write);
opts.write(write);
opts.flags_and_attributes(c::FILE_FLAG_OPEN_REPARSE_POINT |
c::FILE_FLAG_BACKUP_SEMANTICS);
File::open(path, &opts)
}
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = to_utf16(path);
let handle = unsafe {
libc::CreateFileW(path.as_ptr(),
opts.get_desired_access(),
opts.get_share_mode(),
opts.security_attributes as *mut _,
opts.get_creation_disposition(),
opts.get_flags_and_attributes(),
ptr::null_mut())
};
if handle == libc::INVALID_HANDLE_VALUE {
Err(Error::last_os_error())
} else {
Ok(File { handle: Handle::new(handle) })
}
}
pub fn fsync(&self) -> io::Result<()> {
try!(cvt(unsafe { libc::FlushFileBuffers(self.handle.raw()) }));
Ok(())
}
pub fn datasync(&self) -> io::Result<()> { self.fsync() }
pub fn truncate(&self, size: u64) -> io::Result<()> {
let mut info = c::FILE_END_OF_FILE_INFO {
EndOfFile: size as libc::LARGE_INTEGER,
};
let size = mem::size_of_val(&info);
try!(cvt(unsafe {
c::SetFileInformationByHandle(self.handle.raw(),
c::FileEndOfFileInfo,
&mut info as *mut _ as *mut _,
size as libc::DWORD)
}));
Ok(())
}
pub fn file_attr(&self) -> io::Result<FileAttr> {
unsafe {
let mut info: c::BY_HANDLE_FILE_INFORMATION = mem::zeroed();
try!(cvt(c::GetFileInformationByHandle(self.handle.raw(),
&mut info)));
let mut attr = FileAttr {
data: c::WIN32_FILE_ATTRIBUTE_DATA {
dwFileAttributes: info.dwFileAttributes,
ftCreationTime: info.ftCreationTime,
ftLastAccessTime: info.ftLastAccessTime,
ftLastWriteTime: info.ftLastWriteTime,
nFileSizeHigh: info.nFileSizeHigh,
nFileSizeLow: info.nFileSizeLow,
},
reparse_tag: 0,
};
if attr.is_reparse_point() {
let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
if let Ok((_, buf)) = self.reparse_point(&mut b) {
attr.reparse_tag = buf.ReparseTag;
}
}
Ok(attr)
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.handle.read(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.handle.write(buf)
}
pub fn flush(&self) -> io::Result<()> { Ok(()) }
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
SeekFrom::Start(n) => (libc::FILE_BEGIN, n as i64),
SeekFrom::End(n) => (libc::FILE_END, n),
SeekFrom::Current(n) => (libc::FILE_CURRENT, n),
};
let pos = pos as libc::LARGE_INTEGER;
let mut newpos = 0;
try!(cvt(unsafe {
libc::SetFilePointerEx(self.handle.raw(), pos,
&mut newpos, whence)
}));
Ok(newpos as u64)
}
pub fn handle(&self) -> &Handle { &self.handle }
pub fn into_handle(self) -> Handle { self.handle }
fn reparse_point<'a>(&self,
space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE])
-> io::Result<(libc::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
unsafe {
let mut bytes = 0;
try!(cvt({
c::DeviceIoControl(self.handle.raw(),
c::FSCTL_GET_REPARSE_POINT,
0 as *mut _,
0,
space.as_mut_ptr() as *mut _,
space.len() as libc::DWORD,
&mut bytes,
0 as *mut _)
}));
Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
}
}
fn readlink(&self) -> io::Result<PathBuf> {
let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
let (_bytes, buf) = try!(self.reparse_point(&mut space));
if buf.ReparseTag!= c::IO_REPARSE_TAG_SYMLINK {
return Err(io::Error::new(io::ErrorKind::Other, "not a symlink"))
}
unsafe {
let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER =
&buf.rest as *const _ as *const _;
let path_buffer = &(*info).PathBuffer as *const _ as *const u16;
let subst_off = (*info).SubstituteNameOffset / 2;
let subst_ptr = path_buffer.offset(subst_off as isize);
let subst_len = (*info).SubstituteNameLength / 2;
let subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
Ok(PathBuf::from(OsString::from_wide(subst)))
}
}
}
impl FromInner<libc::HANDLE> for File {
fn from_inner(handle: libc::HANDLE) -> File {
File { handle: Handle::new(handle) }
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// FIXME(#24570): add more info here (e.g. mode)
let mut b = f.debug_struct("File");
b.field("handle", &self.handle.raw());
if let Ok(path) = get_path(&self) {
b.field("path", &path);
}
b.finish()
}
}
pub fn to_utf16(s: &Path) -> Vec<u16> {
s.as_os_str().encode_wide().chain(Some(0)).collect()
}
impl FileAttr {
pub fn size(&self) -> u64 {
((self.data.nFileSizeHigh as u64) << 32) | (self.data.nFileSizeLow as u64)
}
pub fn perm(&self) -> FilePermissions {
FilePermissions { attrs: self.data.dwFileAttributes }
}
pub fn attrs(&self) -> u32 { self.data.dwFileAttributes as u32 }
pub fn file_type(&self) -> FileType {
FileType::new(self.data.dwFileAttributes, self.reparse_tag)
}
pub fn created(&self) -> u64 { self.to_u64(&self.data.ftCreationTime) }
pub fn accessed(&self) -> u64 { self.to_u64(&self.data.ftLastAccessTime) }
pub fn modified(&self) -> u64 { self.to_u64(&self.data.ftLastWriteTime) }
fn to_u64(&self, ft: &libc::FILETIME) -> u64 {
(ft.dwLowDateTime as u64) | ((ft.dwHighDateTime as u64) << 32)
}
fn is_reparse_point(&self) -> bool {
self.data.dwFileAttributes & libc::FILE_ATTRIBUTE_REPARSE_POINT!= 0
}
}
impl FilePermissions {
pub fn readonly(&self) -> bool {
self.attrs & c::FILE_ATTRIBUTE_READONLY!= 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.attrs |= c::FILE_ATTRIBUTE_READONLY;
} else {
self.attrs &=!c::FILE_ATTRIBUTE_READONLY;
}
}
}
impl FileType {
fn new(attrs: libc::DWORD, reparse_tag: libc::DWORD) -> FileType {
if attrs & libc::FILE_ATTRIBUTE_REPARSE_POINT!= 0 {
match reparse_tag {
c::IO_REPARSE_TAG_SYMLINK => FileType::Symlink,
c::IO_REPARSE_TAG_MOUNT_POINT => FileType::MountPoint,
_ => FileType::ReparsePoint,
}
} else if attrs & c::FILE_ATTRIBUTE_DIRECTORY!= 0 {
FileType::Dir
} else {
FileType::File
}
}
pub fn is_dir(&self) -> bool { *self == FileType::Dir }
pub fn is_file(&self) -> bool { *self == FileType::File }
pub fn is_symlink(&self) -> bool {
*self == FileType::Symlink || *self == FileType::MountPoint
}
}
impl DirBuilder {
pub fn new() -> DirBuilder { DirBuilder }
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
let p = to_utf16(p);
try!(cvt(unsafe {
libc::CreateDirectoryW(p.as_ptr(), ptr::null_mut())
}));
Ok(())
}
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = p.to_path_buf();
let star = p.join("*");
let path = to_utf16(&star);
unsafe {
let mut wfd = mem::zeroed();
let find_handle = libc::FindFirstFileW(path.as_ptr(), &mut wfd);
if find_handle!= libc::INVALID_HANDLE_VALUE {
Ok(ReadDir {
handle: FindNextFileHandle(find_handle),
root: Arc::new(root),
first: Some(wfd),
})
} else {
Err(Error::last_os_error())
}
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
let p_utf16 = to_utf16(p);
try!(cvt(unsafe { libc::DeleteFileW(p_utf16.as_ptr()) }));
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
let old = to_utf16(old);
let new = to_utf16(new);
try!(cvt(unsafe {
libc::MoveFileExW(old.as_ptr(), new.as_ptr(),
libc::MOVEFILE_REPLACE_EXISTING)
}));
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
let p = to_utf16(p);
try!(cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) }));
Ok(())
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
let file = try!(File::open_reparse_point(p, false));
file.readlink()
}
pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
symlink_inner(src, dst, false)
}
pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> {
let src = to_utf16(src);
let dst = to_utf16(dst);
let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
try!(cvt(unsafe {
c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as libc::BOOL
}));
Ok(())
}
pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
let src = to_utf16(src);
let dst = to_utf16(dst);
try!(cvt(unsafe {
libc::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::null_mut())
}));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let attr = try!(lstat(p));
// If this is a reparse point, then we need to reopen the file to get the
// actual destination. We also pass the FILE_FLAG_BACKUP_SEMANTICS flag to
// ensure that we can open directories (this path may be a directory
// junction). Once the file is opened we ask the opened handle what its
// metadata information is.
if attr.is_reparse_point() {
let mut opts = OpenOptions::new();
opts.flags_and_attributes(c::FILE_FLAG_BACKUP_SEMANTICS);
let file = try!(File::open(p, &opts));
file.file_attr()
} else {
Ok(attr)
}
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
let utf16 = to_utf16(p);
unsafe {
let mut attr: FileAttr = mem::zeroed();
try!(cvt(c::GetFileAttributesExW(utf16.as_ptr(),
c::GetFileExInfoStandard,
&mut attr.data as *mut _ as *mut _)));
if attr.is_reparse_point() {
attr.reparse_tag = File::open_reparse_point(p, false).and_then(|f| {
let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
f.reparse_point(&mut b).map(|(_, b)| b.ReparseTag)
}).unwrap_or(0);
}
Ok(attr)
}
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
let p = to_utf16(p);
unsafe {
try!(cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs)));
Ok(())
}
}
fn get_path(f: &File) -> io::Result<PathBuf> {
super::fill_utf16_buf(|buf, sz| unsafe {
c::GetFinalPathNameByHandleW(f.handle.raw(), buf, sz,
libc::VOLUME_NAME_DOS)
}, |buf| {
PathBuf::from(OsString::from_wide(buf))
})
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
let mut opts = OpenOptions::new();
opts.read(true);
let f = try!(File::open(p, &opts));
get_path(&f)
}
pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
unsafe extern "system" fn callback(
_TotalFileSize: libc::LARGE_INTEGER,
TotalBytesTransferred: libc::LARGE_INTEGER,
_StreamSize: libc::LARGE_INTEGER,
_StreamBytesTransferred: libc::LARGE_INTEGER,
_dwStreamNumber: libc::DWORD,
_dwCallbackReason: libc::DWORD,
_hSourceFile: HANDLE,
_hDestinationFile: HANDLE,
lpData: libc::LPVOID,
) -> libc::DWORD {
*(lpData as *mut i64) = TotalBytesTransferred;
c::PROGRESS_CONTINUE
}
let pfrom = to_utf16(from);
let pto = to_utf16(to);
let mut size = 0i64;
try!(cvt(unsafe {
c::CopyFileExW(pfrom.as_ptr(), pto.as_ptr(), Some(callback),
&mut size as *mut _ as *mut _, ptr::null_mut(), 0)
}));
Ok(size as u64)
}
#[test]
fn directory_junctions_are_directories() {
use ffi::OsStr;
use env;
use rand::{self, StdRng, Rng};
macro_rules! t {
($e:expr) => (match $e {
Ok(e) => e,
Err(e) => panic!("{} failed with: {}", stringify!($e), e),
})
}
let d = DirBuilder::new();
let p = env::temp_dir();
let mut r = rand::thread_rng();
let ret = p.join(&format!("rust-{}", r.next_u32()));
let foo = ret.join("foo");
let bar = ret.join("bar");
t!(d.mkdir(&ret));
t!(d.mkdir(&foo));
t!(d.mkdir(&bar));
t!(create_junction(&bar, &foo));
let metadata = stat(&bar);
t!(delete_junction(&bar));
t!(rmdir(&foo));
t!(rmdir(&bar));
t!(rmdir(&ret));
let metadata = t!(metadata);
assert!(metadata.file_type().is_dir());
// Creating a directory junction on windows involves dealing with reparse
// points and the DeviceIoControl function, and this code is a skeleton of
// what can be found here:
//
// http://www.flexhex.com/docs/articles/hard-links.phtml
fn create_junction(src: &Path, dst: &Path) -> io::Result<()> {
let f = try!(opendir(src, true));
let h = f.handle().raw();
unsafe {
let mut data = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
let mut db = data.as_mut_ptr()
as *mut c::REPARSE_MOUNTPOINT_DATA_BUFFER;
let mut buf = &mut (*db).ReparseTarget as *mut _;
let mut i = 0;
let v = br"\??\";
let v = v.iter().map(|x| *x as u16);
for c in v.chain(dst.as_os_str().encode_wide()) {
*buf.offset(i) = c;
i += 1;
}
*buf.offset(i) = 0;
i += 1;
(*db).ReparseTag = c::IO_REPARSE_TAG_MOUNT_POINT;
(*db).ReparseTargetMaximumLength = (i * 2) as libc::WORD;
(*db).ReparseTargetLength = ((i - 1) * 2) as libc::WORD;
(*db).ReparseDataLength =
(*db).ReparseTargetLength as libc::DWORD + 12;
let mut ret = 0;
cvt(c::DeviceIoControl(h as *mut _,
c::FSCTL_SET_REPARSE_POINT,
data.as_ptr() as *mut _,
(*db).ReparseDataLength + 8,
0 as *mut _, 0,
&mut ret,
0 as *mut _)).map(|_| ())
}
}
fn opendir(p: &Path, write: bool) -> io::Result<File> {
unsafe {
let mut token = 0 as *mut _;
let mut tp: c::TOKEN_PRIVILEGES = mem::zeroed();
try!(cvt(c::OpenProcessToken(c::GetCurrentProcess(),
c::TOKEN_ADJUST_PRIVILEGES,
&mut token)));
let name: &OsStr = if write {
"SeRestorePrivilege".as_ref()
|
} else {
"SeBackupPrivilege".as_ref()
};
let name = name.encode_wide().chain(Some(0)).collect::<Vec<_>>();
|
random_line_split
|
|
fs.rs
|
}
}
impl DirEntry {
fn new(root: &Arc<PathBuf>, wfd: &libc::WIN32_FIND_DATAW) -> Option<DirEntry> {
match &wfd.cFileName[0..3] {
// check for '.' and '..'
[46, 0,..] |
[46, 46, 0,..] => return None,
_ => {}
}
Some(DirEntry {
root: root.clone(),
data: *wfd,
})
}
pub fn path(&self) -> PathBuf {
self.root.join(&self.file_name())
}
pub fn file_name(&self) -> OsString {
let filename = super::truncate_utf16_at_nul(&self.data.cFileName);
OsString::from_wide(filename)
}
pub fn file_type(&self) -> io::Result<FileType> {
Ok(FileType::new(self.data.dwFileAttributes,
/* reparse_tag = */ self.data.dwReserved0))
}
pub fn metadata(&self) -> io::Result<FileAttr> {
Ok(FileAttr {
data: c::WIN32_FILE_ATTRIBUTE_DATA {
dwFileAttributes: self.data.dwFileAttributes,
ftCreationTime: self.data.ftCreationTime,
ftLastAccessTime: self.data.ftLastAccessTime,
ftLastWriteTime: self.data.ftLastWriteTime,
nFileSizeHigh: self.data.nFileSizeHigh,
nFileSizeLow: self.data.nFileSizeLow,
},
reparse_tag: self.data.dwReserved0,
})
}
}
impl OpenOptions {
pub fn new() -> OpenOptions { Default::default() }
pub fn read(&mut self, read: bool) { self.read = read; }
pub fn write(&mut self, write: bool) { self.write = write; }
pub fn append(&mut self, append: bool) { self.append = append; }
pub fn create(&mut self, create: bool) { self.create = create; }
pub fn truncate(&mut self, truncate: bool) { self.truncate = truncate; }
pub fn creation_disposition(&mut self, val: u32) {
self.creation_disposition = Some(val);
}
pub fn flags_and_attributes(&mut self, val: u32) {
self.flags_and_attributes = Some(val);
}
pub fn desired_access(&mut self, val: u32) {
self.desired_access = Some(val);
}
pub fn share_mode(&mut self, val: u32) {
self.share_mode = Some(val);
}
pub fn security_attributes(&mut self, attrs: libc::LPSECURITY_ATTRIBUTES) {
self.security_attributes = attrs as usize;
}
fn get_desired_access(&self) -> libc::DWORD {
self.desired_access.unwrap_or({
let mut base = if self.read {libc::FILE_GENERIC_READ} else {0} |
if self.write {libc::FILE_GENERIC_WRITE} else {0};
if self.append {
base &=!libc::FILE_WRITE_DATA;
base |= libc::FILE_APPEND_DATA;
}
base
})
}
fn get_share_mode(&self) -> libc::DWORD {
// libuv has a good comment about this, but the basic idea is that
// we try to emulate unix semantics by enabling all sharing by
// allowing things such as deleting a file while it's still open.
self.share_mode.unwrap_or(libc::FILE_SHARE_READ |
libc::FILE_SHARE_WRITE |
libc::FILE_SHARE_DELETE)
}
fn get_creation_disposition(&self) -> libc::DWORD {
self.creation_disposition.unwrap_or({
match (self.create, self.truncate) {
(true, true) => libc::CREATE_ALWAYS,
(true, false) => libc::OPEN_ALWAYS,
(false, false) => libc::OPEN_EXISTING,
(false, true) => {
if self.write &&!self.append {
libc::CREATE_ALWAYS
} else {
libc::TRUNCATE_EXISTING
}
}
}
})
}
fn get_flags_and_attributes(&self) -> libc::DWORD {
self.flags_and_attributes.unwrap_or(libc::FILE_ATTRIBUTE_NORMAL)
}
}
impl File {
fn open_reparse_point(path: &Path, write: bool) -> io::Result<File> {
let mut opts = OpenOptions::new();
opts.read(!write);
opts.write(write);
opts.flags_and_attributes(c::FILE_FLAG_OPEN_REPARSE_POINT |
c::FILE_FLAG_BACKUP_SEMANTICS);
File::open(path, &opts)
}
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
let path = to_utf16(path);
let handle = unsafe {
libc::CreateFileW(path.as_ptr(),
opts.get_desired_access(),
opts.get_share_mode(),
opts.security_attributes as *mut _,
opts.get_creation_disposition(),
opts.get_flags_and_attributes(),
ptr::null_mut())
};
if handle == libc::INVALID_HANDLE_VALUE {
Err(Error::last_os_error())
} else {
Ok(File { handle: Handle::new(handle) })
}
}
pub fn fsync(&self) -> io::Result<()> {
try!(cvt(unsafe { libc::FlushFileBuffers(self.handle.raw()) }));
Ok(())
}
pub fn datasync(&self) -> io::Result<()> { self.fsync() }
pub fn truncate(&self, size: u64) -> io::Result<()> {
let mut info = c::FILE_END_OF_FILE_INFO {
EndOfFile: size as libc::LARGE_INTEGER,
};
let size = mem::size_of_val(&info);
try!(cvt(unsafe {
c::SetFileInformationByHandle(self.handle.raw(),
c::FileEndOfFileInfo,
&mut info as *mut _ as *mut _,
size as libc::DWORD)
}));
Ok(())
}
pub fn file_attr(&self) -> io::Result<FileAttr> {
unsafe {
let mut info: c::BY_HANDLE_FILE_INFORMATION = mem::zeroed();
try!(cvt(c::GetFileInformationByHandle(self.handle.raw(),
&mut info)));
let mut attr = FileAttr {
data: c::WIN32_FILE_ATTRIBUTE_DATA {
dwFileAttributes: info.dwFileAttributes,
ftCreationTime: info.ftCreationTime,
ftLastAccessTime: info.ftLastAccessTime,
ftLastWriteTime: info.ftLastWriteTime,
nFileSizeHigh: info.nFileSizeHigh,
nFileSizeLow: info.nFileSizeLow,
},
reparse_tag: 0,
};
if attr.is_reparse_point() {
let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
if let Ok((_, buf)) = self.reparse_point(&mut b) {
attr.reparse_tag = buf.ReparseTag;
}
}
Ok(attr)
}
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.handle.read(buf)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.handle.write(buf)
}
pub fn flush(&self) -> io::Result<()> { Ok(()) }
pub fn seek(&self, pos: SeekFrom) -> io::Result<u64> {
let (whence, pos) = match pos {
SeekFrom::Start(n) => (libc::FILE_BEGIN, n as i64),
SeekFrom::End(n) => (libc::FILE_END, n),
SeekFrom::Current(n) => (libc::FILE_CURRENT, n),
};
let pos = pos as libc::LARGE_INTEGER;
let mut newpos = 0;
try!(cvt(unsafe {
libc::SetFilePointerEx(self.handle.raw(), pos,
&mut newpos, whence)
}));
Ok(newpos as u64)
}
pub fn handle(&self) -> &Handle { &self.handle }
pub fn into_handle(self) -> Handle { self.handle }
fn reparse_point<'a>(&self,
space: &'a mut [u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE])
-> io::Result<(libc::DWORD, &'a c::REPARSE_DATA_BUFFER)> {
unsafe {
let mut bytes = 0;
try!(cvt({
c::DeviceIoControl(self.handle.raw(),
c::FSCTL_GET_REPARSE_POINT,
0 as *mut _,
0,
space.as_mut_ptr() as *mut _,
space.len() as libc::DWORD,
&mut bytes,
0 as *mut _)
}));
Ok((bytes, &*(space.as_ptr() as *const c::REPARSE_DATA_BUFFER)))
}
}
fn readlink(&self) -> io::Result<PathBuf> {
let mut space = [0u8; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
let (_bytes, buf) = try!(self.reparse_point(&mut space));
if buf.ReparseTag!= c::IO_REPARSE_TAG_SYMLINK {
return Err(io::Error::new(io::ErrorKind::Other, "not a symlink"))
}
unsafe {
let info: *const c::SYMBOLIC_LINK_REPARSE_BUFFER =
&buf.rest as *const _ as *const _;
let path_buffer = &(*info).PathBuffer as *const _ as *const u16;
let subst_off = (*info).SubstituteNameOffset / 2;
let subst_ptr = path_buffer.offset(subst_off as isize);
let subst_len = (*info).SubstituteNameLength / 2;
let subst = slice::from_raw_parts(subst_ptr, subst_len as usize);
Ok(PathBuf::from(OsString::from_wide(subst)))
}
}
}
impl FromInner<libc::HANDLE> for File {
fn from_inner(handle: libc::HANDLE) -> File {
File { handle: Handle::new(handle) }
}
}
impl fmt::Debug for File {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// FIXME(#24570): add more info here (e.g. mode)
let mut b = f.debug_struct("File");
b.field("handle", &self.handle.raw());
if let Ok(path) = get_path(&self) {
b.field("path", &path);
}
b.finish()
}
}
pub fn to_utf16(s: &Path) -> Vec<u16> {
s.as_os_str().encode_wide().chain(Some(0)).collect()
}
impl FileAttr {
pub fn
|
(&self) -> u64 {
((self.data.nFileSizeHigh as u64) << 32) | (self.data.nFileSizeLow as u64)
}
pub fn perm(&self) -> FilePermissions {
FilePermissions { attrs: self.data.dwFileAttributes }
}
pub fn attrs(&self) -> u32 { self.data.dwFileAttributes as u32 }
pub fn file_type(&self) -> FileType {
FileType::new(self.data.dwFileAttributes, self.reparse_tag)
}
pub fn created(&self) -> u64 { self.to_u64(&self.data.ftCreationTime) }
pub fn accessed(&self) -> u64 { self.to_u64(&self.data.ftLastAccessTime) }
pub fn modified(&self) -> u64 { self.to_u64(&self.data.ftLastWriteTime) }
fn to_u64(&self, ft: &libc::FILETIME) -> u64 {
(ft.dwLowDateTime as u64) | ((ft.dwHighDateTime as u64) << 32)
}
fn is_reparse_point(&self) -> bool {
self.data.dwFileAttributes & libc::FILE_ATTRIBUTE_REPARSE_POINT!= 0
}
}
impl FilePermissions {
pub fn readonly(&self) -> bool {
self.attrs & c::FILE_ATTRIBUTE_READONLY!= 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.attrs |= c::FILE_ATTRIBUTE_READONLY;
} else {
self.attrs &=!c::FILE_ATTRIBUTE_READONLY;
}
}
}
impl FileType {
fn new(attrs: libc::DWORD, reparse_tag: libc::DWORD) -> FileType {
if attrs & libc::FILE_ATTRIBUTE_REPARSE_POINT!= 0 {
match reparse_tag {
c::IO_REPARSE_TAG_SYMLINK => FileType::Symlink,
c::IO_REPARSE_TAG_MOUNT_POINT => FileType::MountPoint,
_ => FileType::ReparsePoint,
}
} else if attrs & c::FILE_ATTRIBUTE_DIRECTORY!= 0 {
FileType::Dir
} else {
FileType::File
}
}
pub fn is_dir(&self) -> bool { *self == FileType::Dir }
pub fn is_file(&self) -> bool { *self == FileType::File }
pub fn is_symlink(&self) -> bool {
*self == FileType::Symlink || *self == FileType::MountPoint
}
}
impl DirBuilder {
pub fn new() -> DirBuilder { DirBuilder }
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
let p = to_utf16(p);
try!(cvt(unsafe {
libc::CreateDirectoryW(p.as_ptr(), ptr::null_mut())
}));
Ok(())
}
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = p.to_path_buf();
let star = p.join("*");
let path = to_utf16(&star);
unsafe {
let mut wfd = mem::zeroed();
let find_handle = libc::FindFirstFileW(path.as_ptr(), &mut wfd);
if find_handle!= libc::INVALID_HANDLE_VALUE {
Ok(ReadDir {
handle: FindNextFileHandle(find_handle),
root: Arc::new(root),
first: Some(wfd),
})
} else {
Err(Error::last_os_error())
}
}
}
pub fn unlink(p: &Path) -> io::Result<()> {
let p_utf16 = to_utf16(p);
try!(cvt(unsafe { libc::DeleteFileW(p_utf16.as_ptr()) }));
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
let old = to_utf16(old);
let new = to_utf16(new);
try!(cvt(unsafe {
libc::MoveFileExW(old.as_ptr(), new.as_ptr(),
libc::MOVEFILE_REPLACE_EXISTING)
}));
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
let p = to_utf16(p);
try!(cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) }));
Ok(())
}
pub fn readlink(p: &Path) -> io::Result<PathBuf> {
let file = try!(File::open_reparse_point(p, false));
file.readlink()
}
pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> {
symlink_inner(src, dst, false)
}
pub fn symlink_inner(src: &Path, dst: &Path, dir: bool) -> io::Result<()> {
let src = to_utf16(src);
let dst = to_utf16(dst);
let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
try!(cvt(unsafe {
c::CreateSymbolicLinkW(dst.as_ptr(), src.as_ptr(), flags) as libc::BOOL
}));
Ok(())
}
pub fn link(src: &Path, dst: &Path) -> io::Result<()> {
let src = to_utf16(src);
let dst = to_utf16(dst);
try!(cvt(unsafe {
libc::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::null_mut())
}));
Ok(())
}
pub fn stat(p: &Path) -> io::Result<FileAttr> {
let attr = try!(lstat(p));
// If this is a reparse point, then we need to reopen the file to get the
// actual destination. We also pass the FILE_FLAG_BACKUP_SEMANTICS flag to
// ensure that we can open directories (this path may be a directory
// junction). Once the file is opened we ask the opened handle what its
// metadata information is.
if attr.is_reparse_point() {
let mut opts = OpenOptions::new();
opts.flags_and_attributes(c::FILE_FLAG_BACKUP_SEMANTICS);
let file = try!(File::open(p, &opts));
file.file_attr()
} else {
Ok(attr)
}
}
pub fn lstat(p: &Path) -> io::Result<FileAttr> {
let utf16 = to_utf16(p);
unsafe {
let mut attr: FileAttr = mem::zeroed();
try!(cvt(c::GetFileAttributesExW(utf16.as_ptr(),
c::GetFileExInfoStandard,
&mut attr.data as *mut _ as *mut _)));
if attr.is_reparse_point() {
attr.reparse_tag = File::open_reparse_point(p, false).and_then(|f| {
let mut b = [0; c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
f.reparse_point(&mut b).map(|(_, b)| b.ReparseTag)
}).unwrap_or(0);
}
Ok(attr)
}
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
let p = to_utf16(p);
unsafe {
try!(cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs)));
Ok(())
}
}
fn get_path(f: &File) -> io::Result<PathBuf> {
super::fill_utf16_buf(|buf, sz| unsafe {
c::GetFinalPathNameByHandleW(f.handle.raw(), buf, sz,
libc::VOLUME_NAME_DOS)
}, |buf| {
PathBuf::from(OsString::from_wide(buf))
})
}
pub fn canonicalize(p: &Path) -> io::Result<PathBuf> {
let mut opts = OpenOptions::new();
opts.read(true);
let f = try!(File::open(p, &opts));
get_path(&f)
}
pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
unsafe extern "system" fn callback(
_TotalFileSize: libc::LARGE_INTEGER,
TotalBytesTransferred: libc::LARGE_INTEGER,
_StreamSize: libc::LARGE_INTEGER,
_StreamBytesTransferred: libc::LARGE_INTEGER,
_dwStreamNumber: libc::DWORD,
_dwCallbackReason: libc::DWORD,
_hSourceFile: HANDLE,
_hDestinationFile: HANDLE,
lpData: libc::LPVOID,
) -> libc::DWORD {
|
size
|
identifier_name
|
reflector.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use syntax::ast::{ItemKind, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::{Annotatable, ExtCtxt};
use utils::match_ty_unwrap;
pub fn expand_reflector(cx: &mut ExtCtxt, span: Span, _: &MetaItem, annotatable: &Annotatable,
push: &mut FnMut(Annotatable))
|
impl_item.map(|it| push(Annotatable::Item(it)))
},
// Or just call it on the first field (supertype).
None => {
let field_name = def.fields()[0].ident;
let impl_item = quote_item!(cx,
impl ::dom::bindings::reflector::Reflectable for $struct_name {
fn reflector<'a>(&'a self) -> &'a ::dom::bindings::reflector::Reflector {
self.$field_name.reflector()
}
fn init_reflector(&mut self, obj: *mut ::js::jsapi::JSObject) {
self.$field_name.init_reflector(obj);
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)))
}
};
let impl_item = quote_item!(cx,
impl ::js::conversions::ToJSValConvertible for $struct_name {
#[allow(unsafe_code)]
unsafe fn to_jsval(&self,
cx: *mut ::js::jsapi::JSContext,
rval: ::js::jsapi::MutableHandleValue) {
let object = ::dom::bindings::reflector::Reflectable::reflector(self).get_jsobject();
object.to_jsval(cx, rval)
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)));
} else {
cx.span_err(span, "#[dom_struct] seems to have been applied to a non-struct");
}
}
}
|
{
if let &Annotatable::Item(ref item) = annotatable {
if let ItemKind::Struct(ref def, _) = item.node {
let struct_name = item.ident;
// This path has to be hardcoded, unfortunately, since we can't resolve paths at expansion time
match def.fields().iter().find(
|f| match_ty_unwrap(&*f.ty, &["dom", "bindings", "reflector", "Reflector"]).is_some()) {
// If it has a field that is a Reflector, use that
Some(f) => {
let field_name = f.ident;
let impl_item = quote_item!(cx,
impl ::dom::bindings::reflector::Reflectable for $struct_name {
fn reflector<'a>(&'a self) -> &'a ::dom::bindings::reflector::Reflector {
&self.$field_name
}
fn init_reflector(&mut self, obj: *mut ::js::jsapi::JSObject) {
self.$field_name.set_jsobject(obj);
}
}
);
|
identifier_body
|
reflector.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use syntax::ast::{ItemKind, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::{Annotatable, ExtCtxt};
use utils::match_ty_unwrap;
pub fn
|
(cx: &mut ExtCtxt, span: Span, _: &MetaItem, annotatable: &Annotatable,
push: &mut FnMut(Annotatable)) {
if let &Annotatable::Item(ref item) = annotatable {
if let ItemKind::Struct(ref def, _) = item.node {
let struct_name = item.ident;
// This path has to be hardcoded, unfortunately, since we can't resolve paths at expansion time
match def.fields().iter().find(
|f| match_ty_unwrap(&*f.ty, &["dom", "bindings", "reflector", "Reflector"]).is_some()) {
// If it has a field that is a Reflector, use that
Some(f) => {
let field_name = f.ident;
let impl_item = quote_item!(cx,
impl ::dom::bindings::reflector::Reflectable for $struct_name {
fn reflector<'a>(&'a self) -> &'a ::dom::bindings::reflector::Reflector {
&self.$field_name
}
fn init_reflector(&mut self, obj: *mut ::js::jsapi::JSObject) {
self.$field_name.set_jsobject(obj);
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)))
},
// Or just call it on the first field (supertype).
None => {
let field_name = def.fields()[0].ident;
let impl_item = quote_item!(cx,
impl ::dom::bindings::reflector::Reflectable for $struct_name {
fn reflector<'a>(&'a self) -> &'a ::dom::bindings::reflector::Reflector {
self.$field_name.reflector()
}
fn init_reflector(&mut self, obj: *mut ::js::jsapi::JSObject) {
self.$field_name.init_reflector(obj);
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)))
}
};
let impl_item = quote_item!(cx,
impl ::js::conversions::ToJSValConvertible for $struct_name {
#[allow(unsafe_code)]
unsafe fn to_jsval(&self,
cx: *mut ::js::jsapi::JSContext,
rval: ::js::jsapi::MutableHandleValue) {
let object = ::dom::bindings::reflector::Reflectable::reflector(self).get_jsobject();
object.to_jsval(cx, rval)
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)));
} else {
cx.span_err(span, "#[dom_struct] seems to have been applied to a non-struct");
}
}
}
|
expand_reflector
|
identifier_name
|
reflector.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use syntax::ast::{ItemKind, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::{Annotatable, ExtCtxt};
use utils::match_ty_unwrap;
pub fn expand_reflector(cx: &mut ExtCtxt, span: Span, _: &MetaItem, annotatable: &Annotatable,
push: &mut FnMut(Annotatable)) {
if let &Annotatable::Item(ref item) = annotatable
|
},
// Or just call it on the first field (supertype).
None => {
let field_name = def.fields()[0].ident;
let impl_item = quote_item!(cx,
impl ::dom::bindings::reflector::Reflectable for $struct_name {
fn reflector<'a>(&'a self) -> &'a ::dom::bindings::reflector::Reflector {
self.$field_name.reflector()
}
fn init_reflector(&mut self, obj: *mut ::js::jsapi::JSObject) {
self.$field_name.init_reflector(obj);
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)))
}
};
let impl_item = quote_item!(cx,
impl ::js::conversions::ToJSValConvertible for $struct_name {
#[allow(unsafe_code)]
unsafe fn to_jsval(&self,
cx: *mut ::js::jsapi::JSContext,
rval: ::js::jsapi::MutableHandleValue) {
let object = ::dom::bindings::reflector::Reflectable::reflector(self).get_jsobject();
object.to_jsval(cx, rval)
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)));
} else {
cx.span_err(span, "#[dom_struct] seems to have been applied to a non-struct");
}
}
}
|
{
if let ItemKind::Struct(ref def, _) = item.node {
let struct_name = item.ident;
// This path has to be hardcoded, unfortunately, since we can't resolve paths at expansion time
match def.fields().iter().find(
|f| match_ty_unwrap(&*f.ty, &["dom", "bindings", "reflector", "Reflector"]).is_some()) {
// If it has a field that is a Reflector, use that
Some(f) => {
let field_name = f.ident;
let impl_item = quote_item!(cx,
impl ::dom::bindings::reflector::Reflectable for $struct_name {
fn reflector<'a>(&'a self) -> &'a ::dom::bindings::reflector::Reflector {
&self.$field_name
}
fn init_reflector(&mut self, obj: *mut ::js::jsapi::JSObject) {
self.$field_name.set_jsobject(obj);
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)))
|
conditional_block
|
reflector.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use syntax::ast::{ItemKind, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::{Annotatable, ExtCtxt};
use utils::match_ty_unwrap;
pub fn expand_reflector(cx: &mut ExtCtxt, span: Span, _: &MetaItem, annotatable: &Annotatable,
push: &mut FnMut(Annotatable)) {
if let &Annotatable::Item(ref item) = annotatable {
if let ItemKind::Struct(ref def, _) = item.node {
let struct_name = item.ident;
// This path has to be hardcoded, unfortunately, since we can't resolve paths at expansion time
match def.fields().iter().find(
|f| match_ty_unwrap(&*f.ty, &["dom", "bindings", "reflector", "Reflector"]).is_some()) {
// If it has a field that is a Reflector, use that
Some(f) => {
let field_name = f.ident;
let impl_item = quote_item!(cx,
impl ::dom::bindings::reflector::Reflectable for $struct_name {
fn reflector<'a>(&'a self) -> &'a ::dom::bindings::reflector::Reflector {
&self.$field_name
}
fn init_reflector(&mut self, obj: *mut ::js::jsapi::JSObject) {
self.$field_name.set_jsobject(obj);
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)))
},
// Or just call it on the first field (supertype).
None => {
let field_name = def.fields()[0].ident;
let impl_item = quote_item!(cx,
impl ::dom::bindings::reflector::Reflectable for $struct_name {
fn reflector<'a>(&'a self) -> &'a ::dom::bindings::reflector::Reflector {
self.$field_name.reflector()
}
fn init_reflector(&mut self, obj: *mut ::js::jsapi::JSObject) {
self.$field_name.init_reflector(obj);
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)))
}
};
let impl_item = quote_item!(cx,
impl ::js::conversions::ToJSValConvertible for $struct_name {
#[allow(unsafe_code)]
unsafe fn to_jsval(&self,
cx: *mut ::js::jsapi::JSContext,
rval: ::js::jsapi::MutableHandleValue) {
let object = ::dom::bindings::reflector::Reflectable::reflector(self).get_jsobject();
object.to_jsval(cx, rval)
}
}
);
impl_item.map(|it| push(Annotatable::Item(it)));
|
}
}
}
|
} else {
cx.span_err(span, "#[dom_struct] seems to have been applied to a non-struct");
|
random_line_split
|
terminal.rs
|
//! Support for terminal symbols
use crate::status::Result;
use crate::status::Status;
|
pub enum Terminal {
/// Literal string
Literal(String),
///// Character matches a list of chars or a list of ranges
//Match(MatchRules),
///// Indicates an error.
///// It will propagate an error while processing
//Expected(String),
/// Any char
Dot,
/// End Of File
Eof,
}
pub(crate) fn parse<'a>(status: Status<'a>, terminal: &Terminal) -> Result<'a> {
match terminal {
Terminal::Eof => parse_eof(status),
Terminal::Literal(l) => parse_literal(status, l),
Terminal::Dot => parse_dot(status),
}
}
fn parse_eof<'a>(status: Status<'a>) -> Result<'a> {
match status.get_char() {
Ok((st, _ch)) => Err(st.to_error("not end of file :-(")),
Err(st) => Ok(st),
}
}
fn parse_literal<'a>(mut status: Status<'a>, literal: &str) -> Result<'a> {
for ch in literal.chars() {
status = parse_char(status, ch).map_err(|st| st.to_error(&format!("'{}'", literal)))?;
}
Ok(status)
}
fn parse_dot<'a>(status: Status<'a>) -> Result<'a> {
let (status, _ch) = status.get_char().map_err(|st| st.to_error("any char"))?;
Ok(status)
}
fn parse_char(status: Status, ch: char) -> std::result::Result<Status, Status> {
let (st, got_ch) = status.get_char()?;
if ch == got_ch {
Ok(st)
} else {
Err(st)
}
}
impl<'a> Status<'a> {
fn get_char(mut self) -> std::result::Result<(Self, char), Self> {
match self.it_parsing.next() {
None => Err(self),
Some(ch) => {
self.pos.n += 1;
match ch {
'\n' => {
self.pos.col = 0;
self.pos.row += 1;
self.pos.start_line = self.pos.n;
}
'\r' => {
self.pos.col = 0;
}
_ => {
self.pos.col += 1;
}
}
Ok((self, ch))
}
}
}
}
|
/// This is a simple expression with no dependencies with other rules
#[derive(Debug, PartialEq, Clone)]
|
random_line_split
|
terminal.rs
|
//! Support for terminal symbols
use crate::status::Result;
use crate::status::Status;
/// This is a simple expression with no dependencies with other rules
#[derive(Debug, PartialEq, Clone)]
pub enum Terminal {
/// Literal string
Literal(String),
///// Character matches a list of chars or a list of ranges
//Match(MatchRules),
///// Indicates an error.
///// It will propagate an error while processing
//Expected(String),
/// Any char
Dot,
/// End Of File
Eof,
}
pub(crate) fn parse<'a>(status: Status<'a>, terminal: &Terminal) -> Result<'a>
|
fn parse_eof<'a>(status: Status<'a>) -> Result<'a> {
match status.get_char() {
Ok((st, _ch)) => Err(st.to_error("not end of file :-(")),
Err(st) => Ok(st),
}
}
fn parse_literal<'a>(mut status: Status<'a>, literal: &str) -> Result<'a> {
for ch in literal.chars() {
status = parse_char(status, ch).map_err(|st| st.to_error(&format!("'{}'", literal)))?;
}
Ok(status)
}
fn parse_dot<'a>(status: Status<'a>) -> Result<'a> {
let (status, _ch) = status.get_char().map_err(|st| st.to_error("any char"))?;
Ok(status)
}
fn parse_char(status: Status, ch: char) -> std::result::Result<Status, Status> {
let (st, got_ch) = status.get_char()?;
if ch == got_ch {
Ok(st)
} else {
Err(st)
}
}
impl<'a> Status<'a> {
fn get_char(mut self) -> std::result::Result<(Self, char), Self> {
match self.it_parsing.next() {
None => Err(self),
Some(ch) => {
self.pos.n += 1;
match ch {
'\n' => {
self.pos.col = 0;
self.pos.row += 1;
self.pos.start_line = self.pos.n;
}
'\r' => {
self.pos.col = 0;
}
_ => {
self.pos.col += 1;
}
}
Ok((self, ch))
}
}
}
}
|
{
match terminal {
Terminal::Eof => parse_eof(status),
Terminal::Literal(l) => parse_literal(status, l),
Terminal::Dot => parse_dot(status),
}
}
|
identifier_body
|
terminal.rs
|
//! Support for terminal symbols
use crate::status::Result;
use crate::status::Status;
/// This is a simple expression with no dependencies with other rules
#[derive(Debug, PartialEq, Clone)]
pub enum Terminal {
/// Literal string
Literal(String),
///// Character matches a list of chars or a list of ranges
//Match(MatchRules),
///// Indicates an error.
///// It will propagate an error while processing
//Expected(String),
/// Any char
Dot,
/// End Of File
Eof,
}
pub(crate) fn parse<'a>(status: Status<'a>, terminal: &Terminal) -> Result<'a> {
match terminal {
Terminal::Eof => parse_eof(status),
Terminal::Literal(l) => parse_literal(status, l),
Terminal::Dot => parse_dot(status),
}
}
fn parse_eof<'a>(status: Status<'a>) -> Result<'a> {
match status.get_char() {
Ok((st, _ch)) => Err(st.to_error("not end of file :-(")),
Err(st) => Ok(st),
}
}
fn
|
<'a>(mut status: Status<'a>, literal: &str) -> Result<'a> {
for ch in literal.chars() {
status = parse_char(status, ch).map_err(|st| st.to_error(&format!("'{}'", literal)))?;
}
Ok(status)
}
fn parse_dot<'a>(status: Status<'a>) -> Result<'a> {
let (status, _ch) = status.get_char().map_err(|st| st.to_error("any char"))?;
Ok(status)
}
fn parse_char(status: Status, ch: char) -> std::result::Result<Status, Status> {
let (st, got_ch) = status.get_char()?;
if ch == got_ch {
Ok(st)
} else {
Err(st)
}
}
impl<'a> Status<'a> {
fn get_char(mut self) -> std::result::Result<(Self, char), Self> {
match self.it_parsing.next() {
None => Err(self),
Some(ch) => {
self.pos.n += 1;
match ch {
'\n' => {
self.pos.col = 0;
self.pos.row += 1;
self.pos.start_line = self.pos.n;
}
'\r' => {
self.pos.col = 0;
}
_ => {
self.pos.col += 1;
}
}
Ok((self, ch))
}
}
}
}
|
parse_literal
|
identifier_name
|
mvp.rs
|
// taken from vecmath & cam (MIT)
// https://github.com/PistonDevelopers/vecmath
// https://github.com/PistonDevelopers/cam
pub type Vector4 = [f32; 4];
pub type Matrix4 = [[f32; 4]; 4];
pub fn model_view_projection(model: Matrix4, view: Matrix4, projection: Matrix4) -> Matrix4 {
col_mat4_mul(col_mat4_mul(projection, view), model)
}
fn col_mat4_mul(a: Matrix4, b: Matrix4) -> Matrix4 {
[
col_mat4_mul_col(a, b, 0),
col_mat4_mul_col(a, b, 1),
col_mat4_mul_col(a, b, 2),
col_mat4_mul_col(a, b, 3),
]
}
fn col_mat4_mul_col(a: Matrix4, b: Matrix4, i: usize) -> Vector4 {
[
vec4_dot(col_mat4_row(a, 0), b[i]),
vec4_dot(col_mat4_row(a, 1), b[i]),
vec4_dot(col_mat4_row(a, 2), b[i]),
vec4_dot(col_mat4_row(a, 3), b[i]),
]
}
fn vec4_dot(a: Vector4, b: Vector4) -> f32 {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]
}
fn col_mat4_row(a: Matrix4, i: usize) -> Vector4
|
fn row_mat4_col(a: Matrix4, i: usize) -> Vector4 {
[a[0][i], a[1][i], a[2][i], a[3][i]]
}
|
{
row_mat4_col(a, i)
}
|
identifier_body
|
mvp.rs
|
// taken from vecmath & cam (MIT)
// https://github.com/PistonDevelopers/vecmath
// https://github.com/PistonDevelopers/cam
pub type Vector4 = [f32; 4];
pub type Matrix4 = [[f32; 4]; 4];
pub fn model_view_projection(model: Matrix4, view: Matrix4, projection: Matrix4) -> Matrix4 {
col_mat4_mul(col_mat4_mul(projection, view), model)
}
fn col_mat4_mul(a: Matrix4, b: Matrix4) -> Matrix4 {
[
col_mat4_mul_col(a, b, 0),
col_mat4_mul_col(a, b, 1),
col_mat4_mul_col(a, b, 2),
col_mat4_mul_col(a, b, 3),
]
}
fn
|
(a: Matrix4, b: Matrix4, i: usize) -> Vector4 {
[
vec4_dot(col_mat4_row(a, 0), b[i]),
vec4_dot(col_mat4_row(a, 1), b[i]),
vec4_dot(col_mat4_row(a, 2), b[i]),
vec4_dot(col_mat4_row(a, 3), b[i]),
]
}
fn vec4_dot(a: Vector4, b: Vector4) -> f32 {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]
}
fn col_mat4_row(a: Matrix4, i: usize) -> Vector4 {
row_mat4_col(a, i)
}
fn row_mat4_col(a: Matrix4, i: usize) -> Vector4 {
[a[0][i], a[1][i], a[2][i], a[3][i]]
}
|
col_mat4_mul_col
|
identifier_name
|
mvp.rs
|
// taken from vecmath & cam (MIT)
// https://github.com/PistonDevelopers/vecmath
// https://github.com/PistonDevelopers/cam
pub type Vector4 = [f32; 4];
pub type Matrix4 = [[f32; 4]; 4];
pub fn model_view_projection(model: Matrix4, view: Matrix4, projection: Matrix4) -> Matrix4 {
col_mat4_mul(col_mat4_mul(projection, view), model)
}
fn col_mat4_mul(a: Matrix4, b: Matrix4) -> Matrix4 {
[
col_mat4_mul_col(a, b, 0),
col_mat4_mul_col(a, b, 1),
col_mat4_mul_col(a, b, 2),
col_mat4_mul_col(a, b, 3),
]
}
fn col_mat4_mul_col(a: Matrix4, b: Matrix4, i: usize) -> Vector4 {
[
vec4_dot(col_mat4_row(a, 0), b[i]),
vec4_dot(col_mat4_row(a, 1), b[i]),
vec4_dot(col_mat4_row(a, 2), b[i]),
vec4_dot(col_mat4_row(a, 3), b[i]),
|
fn vec4_dot(a: Vector4, b: Vector4) -> f32 {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]
}
fn col_mat4_row(a: Matrix4, i: usize) -> Vector4 {
row_mat4_col(a, i)
}
fn row_mat4_col(a: Matrix4, i: usize) -> Vector4 {
[a[0][i], a[1][i], a[2][i], a[3][i]]
}
|
]
}
|
random_line_split
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::bindings::codegen::Bindings::HTMLTableElementBinding::HTMLTableElementMethods;
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding::{self, HTMLTableRowElementMethods};
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::HTMLTableSectionElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{LayoutJS, MutNullableJS, Root, RootedReference};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmlelement::HTMLElement;
use dom::htmltabledatacellelement::HTMLTableDataCellElement;
use dom::htmltableelement::HTMLTableElement;
use dom::htmltableheadercellelement::HTMLTableHeaderCellElement;
use dom::htmltablesectionelement::HTMLTableSectionElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
use style::attr::AttrValue;
#[derive(JSTraceable)]
struct CellsFilter;
impl CollectionFilter for CellsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
(elem.is::<HTMLTableHeaderCellElement>() || elem.is::<HTMLTableDataCellElement>()) &&
elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
#[dom_struct]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
cells: MutNullableJS<HTMLCollection>,
}
|
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
cells: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(local_name, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
/// Determine the index for this `HTMLTableRowElement` within the given
/// `HTMLCollection`. Returns `-1` if not found within collection.
fn row_index(&self, collection: Root<HTMLCollection>) -> i32 {
collection.elements_iter()
.position(|elem| (&elem as &Element) == self.upcast())
.map_or(-1, |i| i as i32)
}
}
impl HTMLTableRowElementMethods for HTMLTableRowElement {
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_getter!(BgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_legacy_color_setter!(SetBgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-cells
fn Cells(&self) -> Root<HTMLCollection> {
self.cells.or_init(|| {
let window = window_from_node(self);
let filter = box CellsFilter;
HTMLCollection::create(&window, self.upcast(), filter)
})
}
// https://html.spec.whatwg.org/multipage/#dom-tr-insertcell
fn InsertCell(&self, index: i32) -> Fallible<Root<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Cells(),
|| HTMLTableDataCellElement::new(local_name!("td"), None, &node.owner_doc()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-deletecell
fn DeleteCell(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(
index,
|| self.Cells(),
|n| n.is::<HTMLTableDataCellElement>())
}
// https://html.spec.whatwg.org/multipage/#dom-tr-rowindex
fn RowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
if let Some(table) = parent.downcast::<HTMLTableElement>() {
return self.row_index(table.Rows());
}
if!parent.is::<HTMLTableSectionElement>() {
return -1;
}
let grandparent = match parent.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
grandparent.downcast::<HTMLTableElement>()
.map_or(-1, |table| self.row_index(table.Rows()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-sectionrowindex
fn SectionRowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
let collection = if let Some(table) = parent.downcast::<HTMLTableElement>() {
table.Rows()
} else if let Some(table_section) = parent.downcast::<HTMLTableSectionElement>() {
table_section.Rows()
} else {
return -1;
};
self.row_index(collection)
}
}
pub trait HTMLTableRowElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableRowElementLayoutHelpers for LayoutJS<HTMLTableRowElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableRowElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &LocalName, value: DOMString) -> AttrValue {
match *local_name {
local_name!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
|
impl HTMLTableRowElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
|
random_line_split
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::bindings::codegen::Bindings::HTMLTableElementBinding::HTMLTableElementMethods;
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding::{self, HTMLTableRowElementMethods};
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::HTMLTableSectionElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{LayoutJS, MutNullableJS, Root, RootedReference};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmlelement::HTMLElement;
use dom::htmltabledatacellelement::HTMLTableDataCellElement;
use dom::htmltableelement::HTMLTableElement;
use dom::htmltableheadercellelement::HTMLTableHeaderCellElement;
use dom::htmltablesectionelement::HTMLTableSectionElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
use style::attr::AttrValue;
#[derive(JSTraceable)]
struct CellsFilter;
impl CollectionFilter for CellsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
(elem.is::<HTMLTableHeaderCellElement>() || elem.is::<HTMLTableDataCellElement>()) &&
elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
#[dom_struct]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
cells: MutNullableJS<HTMLCollection>,
}
impl HTMLTableRowElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement
|
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(local_name, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
/// Determine the index for this `HTMLTableRowElement` within the given
/// `HTMLCollection`. Returns `-1` if not found within collection.
fn row_index(&self, collection: Root<HTMLCollection>) -> i32 {
collection.elements_iter()
.position(|elem| (&elem as &Element) == self.upcast())
.map_or(-1, |i| i as i32)
}
}
impl HTMLTableRowElementMethods for HTMLTableRowElement {
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_getter!(BgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_legacy_color_setter!(SetBgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-cells
fn Cells(&self) -> Root<HTMLCollection> {
self.cells.or_init(|| {
let window = window_from_node(self);
let filter = box CellsFilter;
HTMLCollection::create(&window, self.upcast(), filter)
})
}
// https://html.spec.whatwg.org/multipage/#dom-tr-insertcell
fn InsertCell(&self, index: i32) -> Fallible<Root<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Cells(),
|| HTMLTableDataCellElement::new(local_name!("td"), None, &node.owner_doc()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-deletecell
fn DeleteCell(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(
index,
|| self.Cells(),
|n| n.is::<HTMLTableDataCellElement>())
}
// https://html.spec.whatwg.org/multipage/#dom-tr-rowindex
fn RowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
if let Some(table) = parent.downcast::<HTMLTableElement>() {
return self.row_index(table.Rows());
}
if!parent.is::<HTMLTableSectionElement>() {
return -1;
}
let grandparent = match parent.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
grandparent.downcast::<HTMLTableElement>()
.map_or(-1, |table| self.row_index(table.Rows()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-sectionrowindex
fn SectionRowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
let collection = if let Some(table) = parent.downcast::<HTMLTableElement>() {
table.Rows()
} else if let Some(table_section) = parent.downcast::<HTMLTableSectionElement>() {
table_section.Rows()
} else {
return -1;
};
self.row_index(collection)
}
}
pub trait HTMLTableRowElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableRowElementLayoutHelpers for LayoutJS<HTMLTableRowElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableRowElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &LocalName, value: DOMString) -> AttrValue {
match *local_name {
local_name!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
|
{
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
cells: Default::default(),
}
}
|
identifier_body
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::bindings::codegen::Bindings::HTMLTableElementBinding::HTMLTableElementMethods;
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding::{self, HTMLTableRowElementMethods};
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::HTMLTableSectionElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{LayoutJS, MutNullableJS, Root, RootedReference};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmlelement::HTMLElement;
use dom::htmltabledatacellelement::HTMLTableDataCellElement;
use dom::htmltableelement::HTMLTableElement;
use dom::htmltableheadercellelement::HTMLTableHeaderCellElement;
use dom::htmltablesectionelement::HTMLTableSectionElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
use style::attr::AttrValue;
#[derive(JSTraceable)]
struct CellsFilter;
impl CollectionFilter for CellsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
(elem.is::<HTMLTableHeaderCellElement>() || elem.is::<HTMLTableDataCellElement>()) &&
elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
#[dom_struct]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
cells: MutNullableJS<HTMLCollection>,
}
impl HTMLTableRowElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
cells: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(local_name, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
/// Determine the index for this `HTMLTableRowElement` within the given
/// `HTMLCollection`. Returns `-1` if not found within collection.
fn row_index(&self, collection: Root<HTMLCollection>) -> i32 {
collection.elements_iter()
.position(|elem| (&elem as &Element) == self.upcast())
.map_or(-1, |i| i as i32)
}
}
impl HTMLTableRowElementMethods for HTMLTableRowElement {
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_getter!(BgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_legacy_color_setter!(SetBgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-cells
fn Cells(&self) -> Root<HTMLCollection> {
self.cells.or_init(|| {
let window = window_from_node(self);
let filter = box CellsFilter;
HTMLCollection::create(&window, self.upcast(), filter)
})
}
// https://html.spec.whatwg.org/multipage/#dom-tr-insertcell
fn InsertCell(&self, index: i32) -> Fallible<Root<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Cells(),
|| HTMLTableDataCellElement::new(local_name!("td"), None, &node.owner_doc()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-deletecell
fn DeleteCell(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(
index,
|| self.Cells(),
|n| n.is::<HTMLTableDataCellElement>())
}
// https://html.spec.whatwg.org/multipage/#dom-tr-rowindex
fn RowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
if let Some(table) = parent.downcast::<HTMLTableElement>() {
return self.row_index(table.Rows());
}
if!parent.is::<HTMLTableSectionElement>() {
return -1;
}
let grandparent = match parent.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
grandparent.downcast::<HTMLTableElement>()
.map_or(-1, |table| self.row_index(table.Rows()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-sectionrowindex
fn SectionRowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
let collection = if let Some(table) = parent.downcast::<HTMLTableElement>() {
table.Rows()
} else if let Some(table_section) = parent.downcast::<HTMLTableSectionElement>() {
table_section.Rows()
} else
|
;
self.row_index(collection)
}
}
pub trait HTMLTableRowElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableRowElementLayoutHelpers for LayoutJS<HTMLTableRowElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableRowElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &LocalName, value: DOMString) -> AttrValue {
match *local_name {
local_name!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
|
{
return -1;
}
|
conditional_block
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::bindings::codegen::Bindings::HTMLTableElementBinding::HTMLTableElementMethods;
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding::{self, HTMLTableRowElementMethods};
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::HTMLTableSectionElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{LayoutJS, MutNullableJS, Root, RootedReference};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmlelement::HTMLElement;
use dom::htmltabledatacellelement::HTMLTableDataCellElement;
use dom::htmltableelement::HTMLTableElement;
use dom::htmltableheadercellelement::HTMLTableHeaderCellElement;
use dom::htmltablesectionelement::HTMLTableSectionElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use html5ever_atoms::LocalName;
use style::attr::AttrValue;
#[derive(JSTraceable)]
struct CellsFilter;
impl CollectionFilter for CellsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
(elem.is::<HTMLTableHeaderCellElement>() || elem.is::<HTMLTableDataCellElement>()) &&
elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
#[dom_struct]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
cells: MutNullableJS<HTMLCollection>,
}
impl HTMLTableRowElement {
fn
|
(local_name: LocalName, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
cells: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(local_name, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
/// Determine the index for this `HTMLTableRowElement` within the given
/// `HTMLCollection`. Returns `-1` if not found within collection.
fn row_index(&self, collection: Root<HTMLCollection>) -> i32 {
collection.elements_iter()
.position(|elem| (&elem as &Element) == self.upcast())
.map_or(-1, |i| i as i32)
}
}
impl HTMLTableRowElementMethods for HTMLTableRowElement {
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_getter!(BgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_legacy_color_setter!(SetBgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-cells
fn Cells(&self) -> Root<HTMLCollection> {
self.cells.or_init(|| {
let window = window_from_node(self);
let filter = box CellsFilter;
HTMLCollection::create(&window, self.upcast(), filter)
})
}
// https://html.spec.whatwg.org/multipage/#dom-tr-insertcell
fn InsertCell(&self, index: i32) -> Fallible<Root<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Cells(),
|| HTMLTableDataCellElement::new(local_name!("td"), None, &node.owner_doc()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-deletecell
fn DeleteCell(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(
index,
|| self.Cells(),
|n| n.is::<HTMLTableDataCellElement>())
}
// https://html.spec.whatwg.org/multipage/#dom-tr-rowindex
fn RowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
if let Some(table) = parent.downcast::<HTMLTableElement>() {
return self.row_index(table.Rows());
}
if!parent.is::<HTMLTableSectionElement>() {
return -1;
}
let grandparent = match parent.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
grandparent.downcast::<HTMLTableElement>()
.map_or(-1, |table| self.row_index(table.Rows()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-sectionrowindex
fn SectionRowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
let collection = if let Some(table) = parent.downcast::<HTMLTableElement>() {
table.Rows()
} else if let Some(table_section) = parent.downcast::<HTMLTableSectionElement>() {
table_section.Rows()
} else {
return -1;
};
self.row_index(collection)
}
}
pub trait HTMLTableRowElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableRowElementLayoutHelpers for LayoutJS<HTMLTableRowElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableRowElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &LocalName, value: DOMString) -> AttrValue {
match *local_name {
local_name!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
|
new_inherited
|
identifier_name
|
trace.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Utilities for tracing JS-managed values.
//!
//! The lifetime of DOM objects is managed by the SpiderMonkey Garbage
//! Collector. A rooted DOM object implementing the interface `Foo` is traced
//! as follows:
//!
//! 1. The GC calls `_trace` defined in `FooBinding` during the marking
//! phase. (This happens through `JSClass.trace` for non-proxy bindings, and
//! through `ProxyTraps.trace` otherwise.)
//! 2. `_trace` calls `Foo::trace()` (an implementation of `JSTraceable`).
//! This is typically derived via a `#[dom_struct]` (implies `#[jstraceable]`) annotation.
//! Non-JS-managed types have an empty inline `trace()` method,
//! achieved via `no_jsmanaged_fields!` or similar.
//! 3. For all fields, `Foo::trace()`
//! calls `trace()` on the field.
//! For example, for fields of type `JS<T>`, `JS<T>::trace()` calls
//! `trace_reflector()`.
//! 4. `trace_reflector()` calls `trace_object()` with the `JSObject` for the
//! reflector.
//! 5. `trace_object()` calls `JS_CallTracer()` to notify the GC, which will
//! add the object to the graph, and will trace that object as well.
//! 6. When the GC finishes tracing, it [`finalizes`](../index.html#destruction)
//! any reflectors that were not reachable.
//!
//! The `no_jsmanaged_fields!()` macro adds an empty implementation of `JSTraceable` to
//! a datatype.
use dom::bindings::js::JS;
use dom::bindings::refcounted::Trusted;
use dom::bindings::utils::{Reflectable, Reflector, WindowProxyHandler};
use script_task::ScriptChan;
use canvas_traits::{CanvasGradientStop, LinearGradientStyle, RadialGradientStyle};
use canvas_traits::{LineCapStyle, LineJoinStyle, CompositionOrBlending, RepetitionStyle};
use cssparser::RGBA;
use encoding::types::EncodingRef;
use geom::matrix2d::Matrix2D;
use geom::rect::Rect;
use geom::size::Size2D;
use html5ever::tree_builder::QuirksMode;
use hyper::header::Headers;
use hyper::method::Method;
use js::jsapi::{JSObject, JSTracer, JS_CallTracer, JSGCTraceKind};
use js::jsval::JSVal;
use js::rust::Runtime;
use layout_interface::{LayoutRPC, LayoutChan};
use libc;
use msg::constellation_msg::{PipelineId, SubpageId, WindowSizeData, WorkerId};
use net_traits::image_cache_task::{ImageCacheChan, ImageCacheTask};
use net_traits::storage_task::StorageType;
use script_traits::ScriptControlChan;
use script_traits::UntrustedNodeAddress;
use smallvec::SmallVec1;
use msg::compositor_msg::ScriptListener;
use msg::constellation_msg::ConstellationChan;
use net_traits::image::base::Image;
use util::str::{LengthOrPercentageOrAuto};
use std::cell::{Cell, RefCell};
use std::collections::{HashMap, HashSet};
use std::collections::hash_state::HashState;
use std::ffi::CString;
use std::hash::{Hash, Hasher};
use std::intrinsics::return_address;
use std::ops::{Deref, DerefMut};
use std::rc::Rc;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use string_cache::{Atom, Namespace};
use style::properties::PropertyDeclarationBlock;
use url::Url;
/// A trait to allow tracing (only) DOM objects.
pub trait JSTraceable {
/// Trace `self`.
fn trace(&self, trc: *mut JSTracer);
}
impl<T: Reflectable> JSTraceable for JS<T> {
fn trace(&self, trc: *mut JSTracer) {
trace_reflector(trc, "", self.reflector());
}
}
no_jsmanaged_fields!(EncodingRef);
no_jsmanaged_fields!(Reflector);
/// Trace a `JSVal`.
pub fn trace_jsval(tracer: *mut JSTracer, description: &str, val: JSVal) {
if!val.is_markable() {
return;
}
unsafe {
let name = CString::new(description).unwrap();
(*tracer).debugPrinter = None;
(*tracer).debugPrintIndex =!0;
(*tracer).debugPrintArg = name.as_ptr() as *const libc::c_void;
debug!("tracing value {}", description);
JS_CallTracer(tracer, val.to_gcthing(), val.trace_kind());
}
}
/// Trace the `JSObject` held by `reflector`.
#[allow(unrooted_must_root)]
pub fn trace_reflector(tracer: *mut JSTracer, description: &str, reflector: &Reflector) {
|
pub fn trace_object(tracer: *mut JSTracer, description: &str, obj: *mut JSObject) {
unsafe {
let name = CString::new(description).unwrap();
(*tracer).debugPrinter = None;
(*tracer).debugPrintIndex =!0;
(*tracer).debugPrintArg = name.as_ptr() as *const libc::c_void;
debug!("tracing {}", description);
JS_CallTracer(tracer, obj as *mut libc::c_void, JSGCTraceKind::JSTRACE_OBJECT);
}
}
impl<T: JSTraceable> JSTraceable for RefCell<T> {
fn trace(&self, trc: *mut JSTracer) {
self.borrow().trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for Rc<T> {
fn trace(&self, trc: *mut JSTracer) {
(**self).trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for Box<T> {
fn trace(&self, trc: *mut JSTracer) {
(**self).trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for *const T {
fn trace(&self, trc: *mut JSTracer) {
if!self.is_null() {
unsafe {
(**self).trace(trc)
}
}
}
}
impl<T: JSTraceable> JSTraceable for *mut T {
fn trace(&self, trc: *mut JSTracer) {
if!self.is_null() {
unsafe {
(**self).trace(trc)
}
}
}
}
impl<T: JSTraceable+Copy> JSTraceable for Cell<T> {
fn trace(&self, trc: *mut JSTracer) {
self.get().trace(trc)
}
}
impl JSTraceable for *mut JSObject {
fn trace(&self, trc: *mut JSTracer) {
trace_object(trc, "object", *self);
}
}
impl JSTraceable for JSVal {
fn trace(&self, trc: *mut JSTracer) {
trace_jsval(trc, "val", *self);
}
}
// XXXManishearth Check if the following three are optimized to no-ops
// if e.trace() is a no-op (e.g it is an no_jsmanaged_fields type)
impl<T: JSTraceable> JSTraceable for Vec<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for e in self.iter() {
e.trace(trc);
}
}
}
// XXXManishearth Check if the following three are optimized to no-ops
// if e.trace() is a no-op (e.g it is an no_jsmanaged_fields type)
impl<T: JSTraceable +'static> JSTraceable for SmallVec1<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for e in self.iter() {
e.trace(trc);
}
}
}
impl<T: JSTraceable> JSTraceable for Option<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
self.as_ref().map(|e| e.trace(trc));
}
}
impl<T: JSTraceable, U: JSTraceable> JSTraceable for Result<T, U> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
match *self {
Ok(ref inner) => inner.trace(trc),
Err(ref inner) => inner.trace(trc),
}
}
}
impl<K,V,S> JSTraceable for HashMap<K, V, S>
where K: Hash + Eq + JSTraceable,
V: JSTraceable,
S: HashState,
<S as HashState>::Hasher: Hasher,
{
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for (k, v) in self.iter() {
k.trace(trc);
v.trace(trc);
}
}
}
impl<A: JSTraceable, B: JSTraceable> JSTraceable for (A, B) {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
let (ref a, ref b) = *self;
a.trace(trc);
b.trace(trc);
}
}
no_jsmanaged_fields!(bool, f32, f64, String, Url);
no_jsmanaged_fields!(usize, u8, u16, u32, u64);
no_jsmanaged_fields!(isize, i8, i16, i32, i64);
no_jsmanaged_fields!(Sender<T>);
no_jsmanaged_fields!(Receiver<T>);
no_jsmanaged_fields!(Rect<T>);
no_jsmanaged_fields!(Size2D<T>);
no_jsmanaged_fields!(Arc<T>);
no_jsmanaged_fields!(Image, ImageCacheChan, ImageCacheTask, ScriptControlChan);
no_jsmanaged_fields!(Atom, Namespace);
no_jsmanaged_fields!(Trusted<T>);
no_jsmanaged_fields!(PropertyDeclarationBlock);
no_jsmanaged_fields!(HashSet<T>);
// These three are interdependent, if you plan to put jsmanaged data
// in one of these make sure it is propagated properly to containing structs
no_jsmanaged_fields!(SubpageId, WindowSizeData, PipelineId);
no_jsmanaged_fields!(WorkerId);
no_jsmanaged_fields!(QuirksMode);
no_jsmanaged_fields!(Runtime);
no_jsmanaged_fields!(Headers, Method);
no_jsmanaged_fields!(ConstellationChan);
no_jsmanaged_fields!(LayoutChan);
no_jsmanaged_fields!(WindowProxyHandler);
no_jsmanaged_fields!(UntrustedNodeAddress);
no_jsmanaged_fields!(LengthOrPercentageOrAuto);
no_jsmanaged_fields!(RGBA);
no_jsmanaged_fields!(Matrix2D<T>);
no_jsmanaged_fields!(StorageType);
no_jsmanaged_fields!(CanvasGradientStop, LinearGradientStyle, RadialGradientStyle);
no_jsmanaged_fields!(LineCapStyle, LineJoinStyle, CompositionOrBlending);
no_jsmanaged_fields!(RepetitionStyle);
impl JSTraceable for Box<ScriptChan+Send> {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<Fn(f64, )> {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
// Do nothing
}
}
impl<'a> JSTraceable for &'a str {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl<A,B> JSTraceable for fn(A) -> B {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<ScriptListener+'static> {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<LayoutRPC+'static> {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for () {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
}
}
/// Holds a set of vectors that need to be rooted
pub struct RootedCollectionSet {
set: Vec<HashSet<*const RootedVec<Void>>>
}
/// TLV Holds a set of vectors that need to be rooted
thread_local!(pub static ROOTED_COLLECTIONS: Rc<RefCell<RootedCollectionSet>> =
Rc::new(RefCell::new(RootedCollectionSet::new())));
/// Type of `RootedVec`
pub enum CollectionType {
/// DOM objects
DOMObjects,
/// `JSVal`s
JSVals,
/// `*mut JSObject`s
JSObjects,
}
impl RootedCollectionSet {
fn new() -> RootedCollectionSet {
RootedCollectionSet {
set: vec!(HashSet::new(), HashSet::new(), HashSet::new())
}
}
fn remove<T: VecRootableType>(collection: &RootedVec<T>) {
ROOTED_COLLECTIONS.with(|ref collections| {
let type_ = VecRootableType::tag(None::<T>);
let mut collections = collections.borrow_mut();
assert!(collections.set[type_ as usize].remove(&(collection as *const _ as *const _)));
});
}
fn add<T: VecRootableType>(collection: &RootedVec<T>) {
ROOTED_COLLECTIONS.with(|ref collections| {
let type_ = VecRootableType::tag(None::<T>);
let mut collections = collections.borrow_mut();
collections.set[type_ as usize].insert(collection as *const _ as *const _);
})
}
unsafe fn trace(&self, tracer: *mut JSTracer) {
fn trace_collection_type<T>(tracer: *mut JSTracer,
collections: &HashSet<*const RootedVec<Void>>)
where T: JSTraceable + VecRootableType
{
for collection in collections {
let collection: *const RootedVec<Void> = *collection;
let collection = collection as *const RootedVec<T>;
unsafe {
let _ = (*collection).trace(tracer);
}
}
}
let dom_collections =
&self.set[CollectionType::DOMObjects as usize] as *const _ as *const HashSet<*const RootedVec<JS<Void>>>;
for dom_collection in (*dom_collections).iter() {
for reflector in (**dom_collection).iter() {
trace_reflector(tracer, "", reflector.reflector());
}
}
trace_collection_type::<JSVal>(tracer, &self.set[CollectionType::JSVals as usize]);
trace_collection_type::<*mut JSObject>(tracer, &self.set[CollectionType::JSObjects as usize]);
}
}
/// Trait implemented by all types that can be used with RootedVec
pub trait VecRootableType {
/// Return the type tag used to determine how to trace RootedVec
fn tag(_a: Option<Self>) -> CollectionType;
}
impl<T: Reflectable> VecRootableType for JS<T> {
fn tag(_a: Option<JS<T>>) -> CollectionType { CollectionType::DOMObjects }
}
impl VecRootableType for JSVal {
fn tag(_a: Option<JSVal>) -> CollectionType { CollectionType::JSVals }
}
impl VecRootableType for *mut JSObject {
fn tag(_a: Option<*mut JSObject>) -> CollectionType { CollectionType::JSObjects }
}
enum Void {}
impl VecRootableType for Void {
fn tag(_a: Option<Void>) -> CollectionType { unreachable!() }
}
impl Reflectable for Void {
fn reflector<'a>(&'a self) -> &'a Reflector { unreachable!() }
}
/// A vector of items that are rooted for the lifetime
/// of this struct
#[allow(unrooted_must_root)]
#[no_move]
pub struct RootedVec<T: VecRootableType> {
v: Vec<T>
}
impl<T: VecRootableType> RootedVec<T> {
/// Create a vector of items of type T that is rooted for
/// the lifetime of this struct
pub fn new() -> RootedVec<T> {
let addr = unsafe {
return_address() as *const libc::c_void
};
RootedVec::new_with_destination_address(addr)
}
/// Create a vector of items of type T. This constructor is specific
/// for RootCollection.
pub fn new_with_destination_address(addr: *const libc::c_void) -> RootedVec<T> {
unsafe {
RootedCollectionSet::add::<T>(&*(addr as *const _));
}
RootedVec::<T> { v: vec!() }
}
}
impl<T: VecRootableType> Drop for RootedVec<T> {
fn drop(&mut self) {
RootedCollectionSet::remove(self);
}
}
impl<T: VecRootableType> Deref for RootedVec<T> {
type Target = Vec<T>;
fn deref(&self) -> &Vec<T> {
&self.v
}
}
impl<T: VecRootableType> DerefMut for RootedVec<T> {
fn deref_mut(&mut self) -> &mut Vec<T> {
&mut self.v
}
}
/// SM Callback that traces the rooted collections
pub unsafe fn trace_collections(tracer: *mut JSTracer) {
ROOTED_COLLECTIONS.with(|ref collections| {
let collections = collections.borrow();
collections.trace(tracer);
});
}
|
trace_object(tracer, description, reflector.get_jsobject())
}
/// Trace a `JSObject`.
|
random_line_split
|
trace.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Utilities for tracing JS-managed values.
//!
//! The lifetime of DOM objects is managed by the SpiderMonkey Garbage
//! Collector. A rooted DOM object implementing the interface `Foo` is traced
//! as follows:
//!
//! 1. The GC calls `_trace` defined in `FooBinding` during the marking
//! phase. (This happens through `JSClass.trace` for non-proxy bindings, and
//! through `ProxyTraps.trace` otherwise.)
//! 2. `_trace` calls `Foo::trace()` (an implementation of `JSTraceable`).
//! This is typically derived via a `#[dom_struct]` (implies `#[jstraceable]`) annotation.
//! Non-JS-managed types have an empty inline `trace()` method,
//! achieved via `no_jsmanaged_fields!` or similar.
//! 3. For all fields, `Foo::trace()`
//! calls `trace()` on the field.
//! For example, for fields of type `JS<T>`, `JS<T>::trace()` calls
//! `trace_reflector()`.
//! 4. `trace_reflector()` calls `trace_object()` with the `JSObject` for the
//! reflector.
//! 5. `trace_object()` calls `JS_CallTracer()` to notify the GC, which will
//! add the object to the graph, and will trace that object as well.
//! 6. When the GC finishes tracing, it [`finalizes`](../index.html#destruction)
//! any reflectors that were not reachable.
//!
//! The `no_jsmanaged_fields!()` macro adds an empty implementation of `JSTraceable` to
//! a datatype.
use dom::bindings::js::JS;
use dom::bindings::refcounted::Trusted;
use dom::bindings::utils::{Reflectable, Reflector, WindowProxyHandler};
use script_task::ScriptChan;
use canvas_traits::{CanvasGradientStop, LinearGradientStyle, RadialGradientStyle};
use canvas_traits::{LineCapStyle, LineJoinStyle, CompositionOrBlending, RepetitionStyle};
use cssparser::RGBA;
use encoding::types::EncodingRef;
use geom::matrix2d::Matrix2D;
use geom::rect::Rect;
use geom::size::Size2D;
use html5ever::tree_builder::QuirksMode;
use hyper::header::Headers;
use hyper::method::Method;
use js::jsapi::{JSObject, JSTracer, JS_CallTracer, JSGCTraceKind};
use js::jsval::JSVal;
use js::rust::Runtime;
use layout_interface::{LayoutRPC, LayoutChan};
use libc;
use msg::constellation_msg::{PipelineId, SubpageId, WindowSizeData, WorkerId};
use net_traits::image_cache_task::{ImageCacheChan, ImageCacheTask};
use net_traits::storage_task::StorageType;
use script_traits::ScriptControlChan;
use script_traits::UntrustedNodeAddress;
use smallvec::SmallVec1;
use msg::compositor_msg::ScriptListener;
use msg::constellation_msg::ConstellationChan;
use net_traits::image::base::Image;
use util::str::{LengthOrPercentageOrAuto};
use std::cell::{Cell, RefCell};
use std::collections::{HashMap, HashSet};
use std::collections::hash_state::HashState;
use std::ffi::CString;
use std::hash::{Hash, Hasher};
use std::intrinsics::return_address;
use std::ops::{Deref, DerefMut};
use std::rc::Rc;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use string_cache::{Atom, Namespace};
use style::properties::PropertyDeclarationBlock;
use url::Url;
/// A trait to allow tracing (only) DOM objects.
pub trait JSTraceable {
/// Trace `self`.
fn trace(&self, trc: *mut JSTracer);
}
impl<T: Reflectable> JSTraceable for JS<T> {
fn trace(&self, trc: *mut JSTracer) {
trace_reflector(trc, "", self.reflector());
}
}
no_jsmanaged_fields!(EncodingRef);
no_jsmanaged_fields!(Reflector);
/// Trace a `JSVal`.
pub fn trace_jsval(tracer: *mut JSTracer, description: &str, val: JSVal) {
if!val.is_markable() {
return;
}
unsafe {
let name = CString::new(description).unwrap();
(*tracer).debugPrinter = None;
(*tracer).debugPrintIndex =!0;
(*tracer).debugPrintArg = name.as_ptr() as *const libc::c_void;
debug!("tracing value {}", description);
JS_CallTracer(tracer, val.to_gcthing(), val.trace_kind());
}
}
/// Trace the `JSObject` held by `reflector`.
#[allow(unrooted_must_root)]
pub fn trace_reflector(tracer: *mut JSTracer, description: &str, reflector: &Reflector) {
trace_object(tracer, description, reflector.get_jsobject())
}
/// Trace a `JSObject`.
pub fn trace_object(tracer: *mut JSTracer, description: &str, obj: *mut JSObject) {
unsafe {
let name = CString::new(description).unwrap();
(*tracer).debugPrinter = None;
(*tracer).debugPrintIndex =!0;
(*tracer).debugPrintArg = name.as_ptr() as *const libc::c_void;
debug!("tracing {}", description);
JS_CallTracer(tracer, obj as *mut libc::c_void, JSGCTraceKind::JSTRACE_OBJECT);
}
}
impl<T: JSTraceable> JSTraceable for RefCell<T> {
fn trace(&self, trc: *mut JSTracer) {
self.borrow().trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for Rc<T> {
fn trace(&self, trc: *mut JSTracer) {
(**self).trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for Box<T> {
fn trace(&self, trc: *mut JSTracer) {
(**self).trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for *const T {
fn trace(&self, trc: *mut JSTracer) {
if!self.is_null() {
unsafe {
(**self).trace(trc)
}
}
}
}
impl<T: JSTraceable> JSTraceable for *mut T {
fn trace(&self, trc: *mut JSTracer) {
if!self.is_null() {
unsafe {
(**self).trace(trc)
}
}
}
}
impl<T: JSTraceable+Copy> JSTraceable for Cell<T> {
fn trace(&self, trc: *mut JSTracer) {
self.get().trace(trc)
}
}
impl JSTraceable for *mut JSObject {
fn trace(&self, trc: *mut JSTracer) {
trace_object(trc, "object", *self);
}
}
impl JSTraceable for JSVal {
fn trace(&self, trc: *mut JSTracer) {
trace_jsval(trc, "val", *self);
}
}
// XXXManishearth Check if the following three are optimized to no-ops
// if e.trace() is a no-op (e.g it is an no_jsmanaged_fields type)
impl<T: JSTraceable> JSTraceable for Vec<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for e in self.iter() {
e.trace(trc);
}
}
}
// XXXManishearth Check if the following three are optimized to no-ops
// if e.trace() is a no-op (e.g it is an no_jsmanaged_fields type)
impl<T: JSTraceable +'static> JSTraceable for SmallVec1<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for e in self.iter() {
e.trace(trc);
}
}
}
impl<T: JSTraceable> JSTraceable for Option<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
self.as_ref().map(|e| e.trace(trc));
}
}
impl<T: JSTraceable, U: JSTraceable> JSTraceable for Result<T, U> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
match *self {
Ok(ref inner) => inner.trace(trc),
Err(ref inner) => inner.trace(trc),
}
}
}
impl<K,V,S> JSTraceable for HashMap<K, V, S>
where K: Hash + Eq + JSTraceable,
V: JSTraceable,
S: HashState,
<S as HashState>::Hasher: Hasher,
{
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for (k, v) in self.iter() {
k.trace(trc);
v.trace(trc);
}
}
}
impl<A: JSTraceable, B: JSTraceable> JSTraceable for (A, B) {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
let (ref a, ref b) = *self;
a.trace(trc);
b.trace(trc);
}
}
no_jsmanaged_fields!(bool, f32, f64, String, Url);
no_jsmanaged_fields!(usize, u8, u16, u32, u64);
no_jsmanaged_fields!(isize, i8, i16, i32, i64);
no_jsmanaged_fields!(Sender<T>);
no_jsmanaged_fields!(Receiver<T>);
no_jsmanaged_fields!(Rect<T>);
no_jsmanaged_fields!(Size2D<T>);
no_jsmanaged_fields!(Arc<T>);
no_jsmanaged_fields!(Image, ImageCacheChan, ImageCacheTask, ScriptControlChan);
no_jsmanaged_fields!(Atom, Namespace);
no_jsmanaged_fields!(Trusted<T>);
no_jsmanaged_fields!(PropertyDeclarationBlock);
no_jsmanaged_fields!(HashSet<T>);
// These three are interdependent, if you plan to put jsmanaged data
// in one of these make sure it is propagated properly to containing structs
no_jsmanaged_fields!(SubpageId, WindowSizeData, PipelineId);
no_jsmanaged_fields!(WorkerId);
no_jsmanaged_fields!(QuirksMode);
no_jsmanaged_fields!(Runtime);
no_jsmanaged_fields!(Headers, Method);
no_jsmanaged_fields!(ConstellationChan);
no_jsmanaged_fields!(LayoutChan);
no_jsmanaged_fields!(WindowProxyHandler);
no_jsmanaged_fields!(UntrustedNodeAddress);
no_jsmanaged_fields!(LengthOrPercentageOrAuto);
no_jsmanaged_fields!(RGBA);
no_jsmanaged_fields!(Matrix2D<T>);
no_jsmanaged_fields!(StorageType);
no_jsmanaged_fields!(CanvasGradientStop, LinearGradientStyle, RadialGradientStyle);
no_jsmanaged_fields!(LineCapStyle, LineJoinStyle, CompositionOrBlending);
no_jsmanaged_fields!(RepetitionStyle);
impl JSTraceable for Box<ScriptChan+Send> {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<Fn(f64, )> {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
// Do nothing
}
}
impl<'a> JSTraceable for &'a str {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl<A,B> JSTraceable for fn(A) -> B {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<ScriptListener+'static> {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<LayoutRPC+'static> {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for () {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
}
}
/// Holds a set of vectors that need to be rooted
pub struct RootedCollectionSet {
set: Vec<HashSet<*const RootedVec<Void>>>
}
/// TLV Holds a set of vectors that need to be rooted
thread_local!(pub static ROOTED_COLLECTIONS: Rc<RefCell<RootedCollectionSet>> =
Rc::new(RefCell::new(RootedCollectionSet::new())));
/// Type of `RootedVec`
pub enum CollectionType {
/// DOM objects
DOMObjects,
/// `JSVal`s
JSVals,
/// `*mut JSObject`s
JSObjects,
}
impl RootedCollectionSet {
fn new() -> RootedCollectionSet {
RootedCollectionSet {
set: vec!(HashSet::new(), HashSet::new(), HashSet::new())
}
}
fn remove<T: VecRootableType>(collection: &RootedVec<T>) {
ROOTED_COLLECTIONS.with(|ref collections| {
let type_ = VecRootableType::tag(None::<T>);
let mut collections = collections.borrow_mut();
assert!(collections.set[type_ as usize].remove(&(collection as *const _ as *const _)));
});
}
fn add<T: VecRootableType>(collection: &RootedVec<T>) {
ROOTED_COLLECTIONS.with(|ref collections| {
let type_ = VecRootableType::tag(None::<T>);
let mut collections = collections.borrow_mut();
collections.set[type_ as usize].insert(collection as *const _ as *const _);
})
}
unsafe fn trace(&self, tracer: *mut JSTracer) {
fn trace_collection_type<T>(tracer: *mut JSTracer,
collections: &HashSet<*const RootedVec<Void>>)
where T: JSTraceable + VecRootableType
{
for collection in collections {
let collection: *const RootedVec<Void> = *collection;
let collection = collection as *const RootedVec<T>;
unsafe {
let _ = (*collection).trace(tracer);
}
}
}
let dom_collections =
&self.set[CollectionType::DOMObjects as usize] as *const _ as *const HashSet<*const RootedVec<JS<Void>>>;
for dom_collection in (*dom_collections).iter() {
for reflector in (**dom_collection).iter() {
trace_reflector(tracer, "", reflector.reflector());
}
}
trace_collection_type::<JSVal>(tracer, &self.set[CollectionType::JSVals as usize]);
trace_collection_type::<*mut JSObject>(tracer, &self.set[CollectionType::JSObjects as usize]);
}
}
/// Trait implemented by all types that can be used with RootedVec
pub trait VecRootableType {
/// Return the type tag used to determine how to trace RootedVec
fn tag(_a: Option<Self>) -> CollectionType;
}
impl<T: Reflectable> VecRootableType for JS<T> {
fn tag(_a: Option<JS<T>>) -> CollectionType { CollectionType::DOMObjects }
}
impl VecRootableType for JSVal {
fn tag(_a: Option<JSVal>) -> CollectionType { CollectionType::JSVals }
}
impl VecRootableType for *mut JSObject {
fn tag(_a: Option<*mut JSObject>) -> CollectionType
|
}
enum Void {}
impl VecRootableType for Void {
fn tag(_a: Option<Void>) -> CollectionType { unreachable!() }
}
impl Reflectable for Void {
fn reflector<'a>(&'a self) -> &'a Reflector { unreachable!() }
}
/// A vector of items that are rooted for the lifetime
/// of this struct
#[allow(unrooted_must_root)]
#[no_move]
pub struct RootedVec<T: VecRootableType> {
v: Vec<T>
}
impl<T: VecRootableType> RootedVec<T> {
/// Create a vector of items of type T that is rooted for
/// the lifetime of this struct
pub fn new() -> RootedVec<T> {
let addr = unsafe {
return_address() as *const libc::c_void
};
RootedVec::new_with_destination_address(addr)
}
/// Create a vector of items of type T. This constructor is specific
/// for RootCollection.
pub fn new_with_destination_address(addr: *const libc::c_void) -> RootedVec<T> {
unsafe {
RootedCollectionSet::add::<T>(&*(addr as *const _));
}
RootedVec::<T> { v: vec!() }
}
}
impl<T: VecRootableType> Drop for RootedVec<T> {
fn drop(&mut self) {
RootedCollectionSet::remove(self);
}
}
impl<T: VecRootableType> Deref for RootedVec<T> {
type Target = Vec<T>;
fn deref(&self) -> &Vec<T> {
&self.v
}
}
impl<T: VecRootableType> DerefMut for RootedVec<T> {
fn deref_mut(&mut self) -> &mut Vec<T> {
&mut self.v
}
}
/// SM Callback that traces the rooted collections
pub unsafe fn trace_collections(tracer: *mut JSTracer) {
ROOTED_COLLECTIONS.with(|ref collections| {
let collections = collections.borrow();
collections.trace(tracer);
});
}
|
{ CollectionType::JSObjects }
|
identifier_body
|
trace.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Utilities for tracing JS-managed values.
//!
//! The lifetime of DOM objects is managed by the SpiderMonkey Garbage
//! Collector. A rooted DOM object implementing the interface `Foo` is traced
//! as follows:
//!
//! 1. The GC calls `_trace` defined in `FooBinding` during the marking
//! phase. (This happens through `JSClass.trace` for non-proxy bindings, and
//! through `ProxyTraps.trace` otherwise.)
//! 2. `_trace` calls `Foo::trace()` (an implementation of `JSTraceable`).
//! This is typically derived via a `#[dom_struct]` (implies `#[jstraceable]`) annotation.
//! Non-JS-managed types have an empty inline `trace()` method,
//! achieved via `no_jsmanaged_fields!` or similar.
//! 3. For all fields, `Foo::trace()`
//! calls `trace()` on the field.
//! For example, for fields of type `JS<T>`, `JS<T>::trace()` calls
//! `trace_reflector()`.
//! 4. `trace_reflector()` calls `trace_object()` with the `JSObject` for the
//! reflector.
//! 5. `trace_object()` calls `JS_CallTracer()` to notify the GC, which will
//! add the object to the graph, and will trace that object as well.
//! 6. When the GC finishes tracing, it [`finalizes`](../index.html#destruction)
//! any reflectors that were not reachable.
//!
//! The `no_jsmanaged_fields!()` macro adds an empty implementation of `JSTraceable` to
//! a datatype.
use dom::bindings::js::JS;
use dom::bindings::refcounted::Trusted;
use dom::bindings::utils::{Reflectable, Reflector, WindowProxyHandler};
use script_task::ScriptChan;
use canvas_traits::{CanvasGradientStop, LinearGradientStyle, RadialGradientStyle};
use canvas_traits::{LineCapStyle, LineJoinStyle, CompositionOrBlending, RepetitionStyle};
use cssparser::RGBA;
use encoding::types::EncodingRef;
use geom::matrix2d::Matrix2D;
use geom::rect::Rect;
use geom::size::Size2D;
use html5ever::tree_builder::QuirksMode;
use hyper::header::Headers;
use hyper::method::Method;
use js::jsapi::{JSObject, JSTracer, JS_CallTracer, JSGCTraceKind};
use js::jsval::JSVal;
use js::rust::Runtime;
use layout_interface::{LayoutRPC, LayoutChan};
use libc;
use msg::constellation_msg::{PipelineId, SubpageId, WindowSizeData, WorkerId};
use net_traits::image_cache_task::{ImageCacheChan, ImageCacheTask};
use net_traits::storage_task::StorageType;
use script_traits::ScriptControlChan;
use script_traits::UntrustedNodeAddress;
use smallvec::SmallVec1;
use msg::compositor_msg::ScriptListener;
use msg::constellation_msg::ConstellationChan;
use net_traits::image::base::Image;
use util::str::{LengthOrPercentageOrAuto};
use std::cell::{Cell, RefCell};
use std::collections::{HashMap, HashSet};
use std::collections::hash_state::HashState;
use std::ffi::CString;
use std::hash::{Hash, Hasher};
use std::intrinsics::return_address;
use std::ops::{Deref, DerefMut};
use std::rc::Rc;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use string_cache::{Atom, Namespace};
use style::properties::PropertyDeclarationBlock;
use url::Url;
/// A trait to allow tracing (only) DOM objects.
pub trait JSTraceable {
/// Trace `self`.
fn trace(&self, trc: *mut JSTracer);
}
impl<T: Reflectable> JSTraceable for JS<T> {
fn trace(&self, trc: *mut JSTracer) {
trace_reflector(trc, "", self.reflector());
}
}
no_jsmanaged_fields!(EncodingRef);
no_jsmanaged_fields!(Reflector);
/// Trace a `JSVal`.
pub fn trace_jsval(tracer: *mut JSTracer, description: &str, val: JSVal) {
if!val.is_markable() {
return;
}
unsafe {
let name = CString::new(description).unwrap();
(*tracer).debugPrinter = None;
(*tracer).debugPrintIndex =!0;
(*tracer).debugPrintArg = name.as_ptr() as *const libc::c_void;
debug!("tracing value {}", description);
JS_CallTracer(tracer, val.to_gcthing(), val.trace_kind());
}
}
/// Trace the `JSObject` held by `reflector`.
#[allow(unrooted_must_root)]
pub fn trace_reflector(tracer: *mut JSTracer, description: &str, reflector: &Reflector) {
trace_object(tracer, description, reflector.get_jsobject())
}
/// Trace a `JSObject`.
pub fn trace_object(tracer: *mut JSTracer, description: &str, obj: *mut JSObject) {
unsafe {
let name = CString::new(description).unwrap();
(*tracer).debugPrinter = None;
(*tracer).debugPrintIndex =!0;
(*tracer).debugPrintArg = name.as_ptr() as *const libc::c_void;
debug!("tracing {}", description);
JS_CallTracer(tracer, obj as *mut libc::c_void, JSGCTraceKind::JSTRACE_OBJECT);
}
}
impl<T: JSTraceable> JSTraceable for RefCell<T> {
fn trace(&self, trc: *mut JSTracer) {
self.borrow().trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for Rc<T> {
fn trace(&self, trc: *mut JSTracer) {
(**self).trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for Box<T> {
fn trace(&self, trc: *mut JSTracer) {
(**self).trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for *const T {
fn
|
(&self, trc: *mut JSTracer) {
if!self.is_null() {
unsafe {
(**self).trace(trc)
}
}
}
}
impl<T: JSTraceable> JSTraceable for *mut T {
fn trace(&self, trc: *mut JSTracer) {
if!self.is_null() {
unsafe {
(**self).trace(trc)
}
}
}
}
impl<T: JSTraceable+Copy> JSTraceable for Cell<T> {
fn trace(&self, trc: *mut JSTracer) {
self.get().trace(trc)
}
}
impl JSTraceable for *mut JSObject {
fn trace(&self, trc: *mut JSTracer) {
trace_object(trc, "object", *self);
}
}
impl JSTraceable for JSVal {
fn trace(&self, trc: *mut JSTracer) {
trace_jsval(trc, "val", *self);
}
}
// XXXManishearth Check if the following three are optimized to no-ops
// if e.trace() is a no-op (e.g it is an no_jsmanaged_fields type)
impl<T: JSTraceable> JSTraceable for Vec<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for e in self.iter() {
e.trace(trc);
}
}
}
// XXXManishearth Check if the following three are optimized to no-ops
// if e.trace() is a no-op (e.g it is an no_jsmanaged_fields type)
impl<T: JSTraceable +'static> JSTraceable for SmallVec1<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for e in self.iter() {
e.trace(trc);
}
}
}
impl<T: JSTraceable> JSTraceable for Option<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
self.as_ref().map(|e| e.trace(trc));
}
}
impl<T: JSTraceable, U: JSTraceable> JSTraceable for Result<T, U> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
match *self {
Ok(ref inner) => inner.trace(trc),
Err(ref inner) => inner.trace(trc),
}
}
}
impl<K,V,S> JSTraceable for HashMap<K, V, S>
where K: Hash + Eq + JSTraceable,
V: JSTraceable,
S: HashState,
<S as HashState>::Hasher: Hasher,
{
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for (k, v) in self.iter() {
k.trace(trc);
v.trace(trc);
}
}
}
impl<A: JSTraceable, B: JSTraceable> JSTraceable for (A, B) {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
let (ref a, ref b) = *self;
a.trace(trc);
b.trace(trc);
}
}
no_jsmanaged_fields!(bool, f32, f64, String, Url);
no_jsmanaged_fields!(usize, u8, u16, u32, u64);
no_jsmanaged_fields!(isize, i8, i16, i32, i64);
no_jsmanaged_fields!(Sender<T>);
no_jsmanaged_fields!(Receiver<T>);
no_jsmanaged_fields!(Rect<T>);
no_jsmanaged_fields!(Size2D<T>);
no_jsmanaged_fields!(Arc<T>);
no_jsmanaged_fields!(Image, ImageCacheChan, ImageCacheTask, ScriptControlChan);
no_jsmanaged_fields!(Atom, Namespace);
no_jsmanaged_fields!(Trusted<T>);
no_jsmanaged_fields!(PropertyDeclarationBlock);
no_jsmanaged_fields!(HashSet<T>);
// These three are interdependent, if you plan to put jsmanaged data
// in one of these make sure it is propagated properly to containing structs
no_jsmanaged_fields!(SubpageId, WindowSizeData, PipelineId);
no_jsmanaged_fields!(WorkerId);
no_jsmanaged_fields!(QuirksMode);
no_jsmanaged_fields!(Runtime);
no_jsmanaged_fields!(Headers, Method);
no_jsmanaged_fields!(ConstellationChan);
no_jsmanaged_fields!(LayoutChan);
no_jsmanaged_fields!(WindowProxyHandler);
no_jsmanaged_fields!(UntrustedNodeAddress);
no_jsmanaged_fields!(LengthOrPercentageOrAuto);
no_jsmanaged_fields!(RGBA);
no_jsmanaged_fields!(Matrix2D<T>);
no_jsmanaged_fields!(StorageType);
no_jsmanaged_fields!(CanvasGradientStop, LinearGradientStyle, RadialGradientStyle);
no_jsmanaged_fields!(LineCapStyle, LineJoinStyle, CompositionOrBlending);
no_jsmanaged_fields!(RepetitionStyle);
impl JSTraceable for Box<ScriptChan+Send> {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<Fn(f64, )> {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
// Do nothing
}
}
impl<'a> JSTraceable for &'a str {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl<A,B> JSTraceable for fn(A) -> B {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<ScriptListener+'static> {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<LayoutRPC+'static> {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for () {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
}
}
/// Holds a set of vectors that need to be rooted
pub struct RootedCollectionSet {
set: Vec<HashSet<*const RootedVec<Void>>>
}
/// TLV Holds a set of vectors that need to be rooted
thread_local!(pub static ROOTED_COLLECTIONS: Rc<RefCell<RootedCollectionSet>> =
Rc::new(RefCell::new(RootedCollectionSet::new())));
/// Type of `RootedVec`
pub enum CollectionType {
/// DOM objects
DOMObjects,
/// `JSVal`s
JSVals,
/// `*mut JSObject`s
JSObjects,
}
impl RootedCollectionSet {
fn new() -> RootedCollectionSet {
RootedCollectionSet {
set: vec!(HashSet::new(), HashSet::new(), HashSet::new())
}
}
fn remove<T: VecRootableType>(collection: &RootedVec<T>) {
ROOTED_COLLECTIONS.with(|ref collections| {
let type_ = VecRootableType::tag(None::<T>);
let mut collections = collections.borrow_mut();
assert!(collections.set[type_ as usize].remove(&(collection as *const _ as *const _)));
});
}
fn add<T: VecRootableType>(collection: &RootedVec<T>) {
ROOTED_COLLECTIONS.with(|ref collections| {
let type_ = VecRootableType::tag(None::<T>);
let mut collections = collections.borrow_mut();
collections.set[type_ as usize].insert(collection as *const _ as *const _);
})
}
unsafe fn trace(&self, tracer: *mut JSTracer) {
fn trace_collection_type<T>(tracer: *mut JSTracer,
collections: &HashSet<*const RootedVec<Void>>)
where T: JSTraceable + VecRootableType
{
for collection in collections {
let collection: *const RootedVec<Void> = *collection;
let collection = collection as *const RootedVec<T>;
unsafe {
let _ = (*collection).trace(tracer);
}
}
}
let dom_collections =
&self.set[CollectionType::DOMObjects as usize] as *const _ as *const HashSet<*const RootedVec<JS<Void>>>;
for dom_collection in (*dom_collections).iter() {
for reflector in (**dom_collection).iter() {
trace_reflector(tracer, "", reflector.reflector());
}
}
trace_collection_type::<JSVal>(tracer, &self.set[CollectionType::JSVals as usize]);
trace_collection_type::<*mut JSObject>(tracer, &self.set[CollectionType::JSObjects as usize]);
}
}
/// Trait implemented by all types that can be used with RootedVec
pub trait VecRootableType {
/// Return the type tag used to determine how to trace RootedVec
fn tag(_a: Option<Self>) -> CollectionType;
}
impl<T: Reflectable> VecRootableType for JS<T> {
fn tag(_a: Option<JS<T>>) -> CollectionType { CollectionType::DOMObjects }
}
impl VecRootableType for JSVal {
fn tag(_a: Option<JSVal>) -> CollectionType { CollectionType::JSVals }
}
impl VecRootableType for *mut JSObject {
fn tag(_a: Option<*mut JSObject>) -> CollectionType { CollectionType::JSObjects }
}
enum Void {}
impl VecRootableType for Void {
fn tag(_a: Option<Void>) -> CollectionType { unreachable!() }
}
impl Reflectable for Void {
fn reflector<'a>(&'a self) -> &'a Reflector { unreachable!() }
}
/// A vector of items that are rooted for the lifetime
/// of this struct
#[allow(unrooted_must_root)]
#[no_move]
pub struct RootedVec<T: VecRootableType> {
v: Vec<T>
}
impl<T: VecRootableType> RootedVec<T> {
/// Create a vector of items of type T that is rooted for
/// the lifetime of this struct
pub fn new() -> RootedVec<T> {
let addr = unsafe {
return_address() as *const libc::c_void
};
RootedVec::new_with_destination_address(addr)
}
/// Create a vector of items of type T. This constructor is specific
/// for RootCollection.
pub fn new_with_destination_address(addr: *const libc::c_void) -> RootedVec<T> {
unsafe {
RootedCollectionSet::add::<T>(&*(addr as *const _));
}
RootedVec::<T> { v: vec!() }
}
}
impl<T: VecRootableType> Drop for RootedVec<T> {
fn drop(&mut self) {
RootedCollectionSet::remove(self);
}
}
impl<T: VecRootableType> Deref for RootedVec<T> {
type Target = Vec<T>;
fn deref(&self) -> &Vec<T> {
&self.v
}
}
impl<T: VecRootableType> DerefMut for RootedVec<T> {
fn deref_mut(&mut self) -> &mut Vec<T> {
&mut self.v
}
}
/// SM Callback that traces the rooted collections
pub unsafe fn trace_collections(tracer: *mut JSTracer) {
ROOTED_COLLECTIONS.with(|ref collections| {
let collections = collections.borrow();
collections.trace(tracer);
});
}
|
trace
|
identifier_name
|
trace.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Utilities for tracing JS-managed values.
//!
//! The lifetime of DOM objects is managed by the SpiderMonkey Garbage
//! Collector. A rooted DOM object implementing the interface `Foo` is traced
//! as follows:
//!
//! 1. The GC calls `_trace` defined in `FooBinding` during the marking
//! phase. (This happens through `JSClass.trace` for non-proxy bindings, and
//! through `ProxyTraps.trace` otherwise.)
//! 2. `_trace` calls `Foo::trace()` (an implementation of `JSTraceable`).
//! This is typically derived via a `#[dom_struct]` (implies `#[jstraceable]`) annotation.
//! Non-JS-managed types have an empty inline `trace()` method,
//! achieved via `no_jsmanaged_fields!` or similar.
//! 3. For all fields, `Foo::trace()`
//! calls `trace()` on the field.
//! For example, for fields of type `JS<T>`, `JS<T>::trace()` calls
//! `trace_reflector()`.
//! 4. `trace_reflector()` calls `trace_object()` with the `JSObject` for the
//! reflector.
//! 5. `trace_object()` calls `JS_CallTracer()` to notify the GC, which will
//! add the object to the graph, and will trace that object as well.
//! 6. When the GC finishes tracing, it [`finalizes`](../index.html#destruction)
//! any reflectors that were not reachable.
//!
//! The `no_jsmanaged_fields!()` macro adds an empty implementation of `JSTraceable` to
//! a datatype.
use dom::bindings::js::JS;
use dom::bindings::refcounted::Trusted;
use dom::bindings::utils::{Reflectable, Reflector, WindowProxyHandler};
use script_task::ScriptChan;
use canvas_traits::{CanvasGradientStop, LinearGradientStyle, RadialGradientStyle};
use canvas_traits::{LineCapStyle, LineJoinStyle, CompositionOrBlending, RepetitionStyle};
use cssparser::RGBA;
use encoding::types::EncodingRef;
use geom::matrix2d::Matrix2D;
use geom::rect::Rect;
use geom::size::Size2D;
use html5ever::tree_builder::QuirksMode;
use hyper::header::Headers;
use hyper::method::Method;
use js::jsapi::{JSObject, JSTracer, JS_CallTracer, JSGCTraceKind};
use js::jsval::JSVal;
use js::rust::Runtime;
use layout_interface::{LayoutRPC, LayoutChan};
use libc;
use msg::constellation_msg::{PipelineId, SubpageId, WindowSizeData, WorkerId};
use net_traits::image_cache_task::{ImageCacheChan, ImageCacheTask};
use net_traits::storage_task::StorageType;
use script_traits::ScriptControlChan;
use script_traits::UntrustedNodeAddress;
use smallvec::SmallVec1;
use msg::compositor_msg::ScriptListener;
use msg::constellation_msg::ConstellationChan;
use net_traits::image::base::Image;
use util::str::{LengthOrPercentageOrAuto};
use std::cell::{Cell, RefCell};
use std::collections::{HashMap, HashSet};
use std::collections::hash_state::HashState;
use std::ffi::CString;
use std::hash::{Hash, Hasher};
use std::intrinsics::return_address;
use std::ops::{Deref, DerefMut};
use std::rc::Rc;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use string_cache::{Atom, Namespace};
use style::properties::PropertyDeclarationBlock;
use url::Url;
/// A trait to allow tracing (only) DOM objects.
pub trait JSTraceable {
/// Trace `self`.
fn trace(&self, trc: *mut JSTracer);
}
impl<T: Reflectable> JSTraceable for JS<T> {
fn trace(&self, trc: *mut JSTracer) {
trace_reflector(trc, "", self.reflector());
}
}
no_jsmanaged_fields!(EncodingRef);
no_jsmanaged_fields!(Reflector);
/// Trace a `JSVal`.
pub fn trace_jsval(tracer: *mut JSTracer, description: &str, val: JSVal) {
if!val.is_markable()
|
unsafe {
let name = CString::new(description).unwrap();
(*tracer).debugPrinter = None;
(*tracer).debugPrintIndex =!0;
(*tracer).debugPrintArg = name.as_ptr() as *const libc::c_void;
debug!("tracing value {}", description);
JS_CallTracer(tracer, val.to_gcthing(), val.trace_kind());
}
}
/// Trace the `JSObject` held by `reflector`.
#[allow(unrooted_must_root)]
pub fn trace_reflector(tracer: *mut JSTracer, description: &str, reflector: &Reflector) {
trace_object(tracer, description, reflector.get_jsobject())
}
/// Trace a `JSObject`.
pub fn trace_object(tracer: *mut JSTracer, description: &str, obj: *mut JSObject) {
unsafe {
let name = CString::new(description).unwrap();
(*tracer).debugPrinter = None;
(*tracer).debugPrintIndex =!0;
(*tracer).debugPrintArg = name.as_ptr() as *const libc::c_void;
debug!("tracing {}", description);
JS_CallTracer(tracer, obj as *mut libc::c_void, JSGCTraceKind::JSTRACE_OBJECT);
}
}
impl<T: JSTraceable> JSTraceable for RefCell<T> {
fn trace(&self, trc: *mut JSTracer) {
self.borrow().trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for Rc<T> {
fn trace(&self, trc: *mut JSTracer) {
(**self).trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for Box<T> {
fn trace(&self, trc: *mut JSTracer) {
(**self).trace(trc)
}
}
impl<T: JSTraceable> JSTraceable for *const T {
fn trace(&self, trc: *mut JSTracer) {
if!self.is_null() {
unsafe {
(**self).trace(trc)
}
}
}
}
impl<T: JSTraceable> JSTraceable for *mut T {
fn trace(&self, trc: *mut JSTracer) {
if!self.is_null() {
unsafe {
(**self).trace(trc)
}
}
}
}
impl<T: JSTraceable+Copy> JSTraceable for Cell<T> {
fn trace(&self, trc: *mut JSTracer) {
self.get().trace(trc)
}
}
impl JSTraceable for *mut JSObject {
fn trace(&self, trc: *mut JSTracer) {
trace_object(trc, "object", *self);
}
}
impl JSTraceable for JSVal {
fn trace(&self, trc: *mut JSTracer) {
trace_jsval(trc, "val", *self);
}
}
// XXXManishearth Check if the following three are optimized to no-ops
// if e.trace() is a no-op (e.g it is an no_jsmanaged_fields type)
impl<T: JSTraceable> JSTraceable for Vec<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for e in self.iter() {
e.trace(trc);
}
}
}
// XXXManishearth Check if the following three are optimized to no-ops
// if e.trace() is a no-op (e.g it is an no_jsmanaged_fields type)
impl<T: JSTraceable +'static> JSTraceable for SmallVec1<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for e in self.iter() {
e.trace(trc);
}
}
}
impl<T: JSTraceable> JSTraceable for Option<T> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
self.as_ref().map(|e| e.trace(trc));
}
}
impl<T: JSTraceable, U: JSTraceable> JSTraceable for Result<T, U> {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
match *self {
Ok(ref inner) => inner.trace(trc),
Err(ref inner) => inner.trace(trc),
}
}
}
impl<K,V,S> JSTraceable for HashMap<K, V, S>
where K: Hash + Eq + JSTraceable,
V: JSTraceable,
S: HashState,
<S as HashState>::Hasher: Hasher,
{
#[inline]
fn trace(&self, trc: *mut JSTracer) {
for (k, v) in self.iter() {
k.trace(trc);
v.trace(trc);
}
}
}
impl<A: JSTraceable, B: JSTraceable> JSTraceable for (A, B) {
#[inline]
fn trace(&self, trc: *mut JSTracer) {
let (ref a, ref b) = *self;
a.trace(trc);
b.trace(trc);
}
}
no_jsmanaged_fields!(bool, f32, f64, String, Url);
no_jsmanaged_fields!(usize, u8, u16, u32, u64);
no_jsmanaged_fields!(isize, i8, i16, i32, i64);
no_jsmanaged_fields!(Sender<T>);
no_jsmanaged_fields!(Receiver<T>);
no_jsmanaged_fields!(Rect<T>);
no_jsmanaged_fields!(Size2D<T>);
no_jsmanaged_fields!(Arc<T>);
no_jsmanaged_fields!(Image, ImageCacheChan, ImageCacheTask, ScriptControlChan);
no_jsmanaged_fields!(Atom, Namespace);
no_jsmanaged_fields!(Trusted<T>);
no_jsmanaged_fields!(PropertyDeclarationBlock);
no_jsmanaged_fields!(HashSet<T>);
// These three are interdependent, if you plan to put jsmanaged data
// in one of these make sure it is propagated properly to containing structs
no_jsmanaged_fields!(SubpageId, WindowSizeData, PipelineId);
no_jsmanaged_fields!(WorkerId);
no_jsmanaged_fields!(QuirksMode);
no_jsmanaged_fields!(Runtime);
no_jsmanaged_fields!(Headers, Method);
no_jsmanaged_fields!(ConstellationChan);
no_jsmanaged_fields!(LayoutChan);
no_jsmanaged_fields!(WindowProxyHandler);
no_jsmanaged_fields!(UntrustedNodeAddress);
no_jsmanaged_fields!(LengthOrPercentageOrAuto);
no_jsmanaged_fields!(RGBA);
no_jsmanaged_fields!(Matrix2D<T>);
no_jsmanaged_fields!(StorageType);
no_jsmanaged_fields!(CanvasGradientStop, LinearGradientStyle, RadialGradientStyle);
no_jsmanaged_fields!(LineCapStyle, LineJoinStyle, CompositionOrBlending);
no_jsmanaged_fields!(RepetitionStyle);
impl JSTraceable for Box<ScriptChan+Send> {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<Fn(f64, )> {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
// Do nothing
}
}
impl<'a> JSTraceable for &'a str {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl<A,B> JSTraceable for fn(A) -> B {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<ScriptListener+'static> {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for Box<LayoutRPC+'static> {
#[inline]
fn trace(&self, _: *mut JSTracer) {
// Do nothing
}
}
impl JSTraceable for () {
#[inline]
fn trace(&self, _trc: *mut JSTracer) {
}
}
/// Holds a set of vectors that need to be rooted
pub struct RootedCollectionSet {
set: Vec<HashSet<*const RootedVec<Void>>>
}
/// TLV Holds a set of vectors that need to be rooted
thread_local!(pub static ROOTED_COLLECTIONS: Rc<RefCell<RootedCollectionSet>> =
Rc::new(RefCell::new(RootedCollectionSet::new())));
/// Type of `RootedVec`
pub enum CollectionType {
/// DOM objects
DOMObjects,
/// `JSVal`s
JSVals,
/// `*mut JSObject`s
JSObjects,
}
impl RootedCollectionSet {
fn new() -> RootedCollectionSet {
RootedCollectionSet {
set: vec!(HashSet::new(), HashSet::new(), HashSet::new())
}
}
fn remove<T: VecRootableType>(collection: &RootedVec<T>) {
ROOTED_COLLECTIONS.with(|ref collections| {
let type_ = VecRootableType::tag(None::<T>);
let mut collections = collections.borrow_mut();
assert!(collections.set[type_ as usize].remove(&(collection as *const _ as *const _)));
});
}
fn add<T: VecRootableType>(collection: &RootedVec<T>) {
ROOTED_COLLECTIONS.with(|ref collections| {
let type_ = VecRootableType::tag(None::<T>);
let mut collections = collections.borrow_mut();
collections.set[type_ as usize].insert(collection as *const _ as *const _);
})
}
unsafe fn trace(&self, tracer: *mut JSTracer) {
fn trace_collection_type<T>(tracer: *mut JSTracer,
collections: &HashSet<*const RootedVec<Void>>)
where T: JSTraceable + VecRootableType
{
for collection in collections {
let collection: *const RootedVec<Void> = *collection;
let collection = collection as *const RootedVec<T>;
unsafe {
let _ = (*collection).trace(tracer);
}
}
}
let dom_collections =
&self.set[CollectionType::DOMObjects as usize] as *const _ as *const HashSet<*const RootedVec<JS<Void>>>;
for dom_collection in (*dom_collections).iter() {
for reflector in (**dom_collection).iter() {
trace_reflector(tracer, "", reflector.reflector());
}
}
trace_collection_type::<JSVal>(tracer, &self.set[CollectionType::JSVals as usize]);
trace_collection_type::<*mut JSObject>(tracer, &self.set[CollectionType::JSObjects as usize]);
}
}
/// Trait implemented by all types that can be used with RootedVec
pub trait VecRootableType {
/// Return the type tag used to determine how to trace RootedVec
fn tag(_a: Option<Self>) -> CollectionType;
}
impl<T: Reflectable> VecRootableType for JS<T> {
fn tag(_a: Option<JS<T>>) -> CollectionType { CollectionType::DOMObjects }
}
impl VecRootableType for JSVal {
fn tag(_a: Option<JSVal>) -> CollectionType { CollectionType::JSVals }
}
impl VecRootableType for *mut JSObject {
fn tag(_a: Option<*mut JSObject>) -> CollectionType { CollectionType::JSObjects }
}
enum Void {}
impl VecRootableType for Void {
fn tag(_a: Option<Void>) -> CollectionType { unreachable!() }
}
impl Reflectable for Void {
fn reflector<'a>(&'a self) -> &'a Reflector { unreachable!() }
}
/// A vector of items that are rooted for the lifetime
/// of this struct
#[allow(unrooted_must_root)]
#[no_move]
pub struct RootedVec<T: VecRootableType> {
v: Vec<T>
}
impl<T: VecRootableType> RootedVec<T> {
/// Create a vector of items of type T that is rooted for
/// the lifetime of this struct
pub fn new() -> RootedVec<T> {
let addr = unsafe {
return_address() as *const libc::c_void
};
RootedVec::new_with_destination_address(addr)
}
/// Create a vector of items of type T. This constructor is specific
/// for RootCollection.
pub fn new_with_destination_address(addr: *const libc::c_void) -> RootedVec<T> {
unsafe {
RootedCollectionSet::add::<T>(&*(addr as *const _));
}
RootedVec::<T> { v: vec!() }
}
}
impl<T: VecRootableType> Drop for RootedVec<T> {
fn drop(&mut self) {
RootedCollectionSet::remove(self);
}
}
impl<T: VecRootableType> Deref for RootedVec<T> {
type Target = Vec<T>;
fn deref(&self) -> &Vec<T> {
&self.v
}
}
impl<T: VecRootableType> DerefMut for RootedVec<T> {
fn deref_mut(&mut self) -> &mut Vec<T> {
&mut self.v
}
}
/// SM Callback that traces the rooted collections
pub unsafe fn trace_collections(tracer: *mut JSTracer) {
ROOTED_COLLECTIONS.with(|ref collections| {
let collections = collections.borrow();
collections.trace(tracer);
});
}
|
{
return;
}
|
conditional_block
|
main.rs
|
#![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::Path;
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: core::SessionRef) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
println!("MATCH {};{};{};{};{};{:?};{}", m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr
);
}
#[cfg(not(test))]
fn match_fn(m: Match, session: core::SessionRef) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
println!("MATCH {},{},{},{},{:?},{}", m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr
);
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(args: Vec<String>, print_type: CompletePrinter) {
if args.len() < 1 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
match args[0].parse::<usize>() {
Ok(linenum) => {
complete_by_line_coords(args, linenum, print_type);
}
Err(_) => {
external_complete(&args);
}
}
}
#[cfg(not(test))]
fn
|
(args: Vec<String>,
linenum: usize,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(args, linenum, print_type);
}).unwrap();
match res.join() {
Ok(_) => {},
Err(e) => {
error!("Search thread paniced: {:?}", e);
}
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(args: Vec<String>, linenum: usize, print_type: CompletePrinter) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(fname);
let session = core::Session::from_path(&fpath, &substitute_file);
let src = session.load_file(&fpath);
let line = &getline(&substitute_file, linenum, &session);
let (start, pos) = util::expand_ident(line, charnum);
println!("PREFIX {},{},{}", start, pos, &line[start..pos]);
let point = scopes::coords_to_point(&src, linenum, charnum);
for m in core::complete_from_file(&src, &fpath, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session),
};
}
}
#[cfg(not(test))]
fn external_complete(args: &[String]) {
// input: a command line string passed in
let arg = &args[0];
let it = arg.split("::");
let p: Vec<&str> = it.collect();
let session = core::Session::from_path(&Path::new("."), &Path::new("."));
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session);
}
}
}
}
#[cfg(not(test))]
fn prefix(args: &[String]) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let linenum = args[0].parse::<usize>().unwrap();
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(&fname);
let session = core::Session::from_path(&fpath, &substitute_file);
// print the start, end, and the identifier prefix being matched
let path = Path::new(fname);
let line = &getline(&path, linenum, &session);
let (start, pos) = util::expand_ident(line, charnum);
println!("PREFIX {},{},{}", start, pos, &line[start..pos]);
}
#[cfg(not(test))]
fn find_definition(args: &[String]) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let linenum = args[0].parse::<usize>().unwrap();
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(&fname);
let session = core::Session::from_path(&fpath, &substitute_file);
let src = session.load_file(&fpath);
let pos = scopes::coords_to_point(&src, linenum, charnum);
core::find_definition(&src, &fpath, pos, &session).map(|m| match_fn(m, &session));
println!("END");
}
#[cfg(not(test))]
fn print_usage() {
let program = std::env::args().next().unwrap().clone();
println!("usage: {} complete linenum charnum fname [substitute_file]", program);
println!("or: {} find-definition linenum charnum fname [substitute_file]", program);
println!("or: {} complete fullyqualifiedname (e.g. std::io::)", program);
println!("or: {} prefix linenum charnum fname", program);
println!("or replace complete with complete-with-snippet for more detailed completions.");
println!("or: {} daemon - to start a process that receives the above commands via stdin", program);
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if!v.is_empty() {
let f = Path::new(v[0]);
if!path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if!path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon() {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
if n == 0 {
break;
}
let args: Vec<String> = input.split(" ").map(|s| s.trim().to_string()).collect();
run(args);
input.clear();
}
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let mut args: Vec<String> = std::env::args().collect();
if args.len() == 1 {
print_usage();
std::process::exit(1);
}
args.remove(0);
run(args);
}
#[cfg(not(test))]
fn run(mut args: Vec<String>) {
//let command = &args[0];
let command = args.remove(0);
match &command[..] {
"daemon" => daemon(),
"prefix" => prefix(&args),
"complete" => complete(args, CompletePrinter::Normal),
"complete-with-snippet" => complete(args, CompletePrinter::WithSnippets),
"find-definition" => find_definition(&args),
"help" => print_usage(),
cmd => {
println!("Sorry, I didn't understand command {}", cmd);
print_usage();
std::process::exit(1);
}
}
}
|
complete_by_line_coords
|
identifier_name
|
main.rs
|
#![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::Path;
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: core::SessionRef) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
println!("MATCH {};{};{};{};{};{:?};{}", m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr
);
}
#[cfg(not(test))]
fn match_fn(m: Match, session: core::SessionRef) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
println!("MATCH {},{},{},{},{:?},{}", m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr
);
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(args: Vec<String>, print_type: CompletePrinter) {
if args.len() < 1 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
match args[0].parse::<usize>() {
Ok(linenum) => {
complete_by_line_coords(args, linenum, print_type);
}
Err(_) => {
external_complete(&args);
}
}
}
#[cfg(not(test))]
fn complete_by_line_coords(args: Vec<String>,
linenum: usize,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
|
match res.join() {
Ok(_) => {},
Err(e) => {
error!("Search thread paniced: {:?}", e);
}
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(args: Vec<String>, linenum: usize, print_type: CompletePrinter) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(fname);
let session = core::Session::from_path(&fpath, &substitute_file);
let src = session.load_file(&fpath);
let line = &getline(&substitute_file, linenum, &session);
let (start, pos) = util::expand_ident(line, charnum);
println!("PREFIX {},{},{}", start, pos, &line[start..pos]);
let point = scopes::coords_to_point(&src, linenum, charnum);
for m in core::complete_from_file(&src, &fpath, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session),
};
}
}
#[cfg(not(test))]
fn external_complete(args: &[String]) {
// input: a command line string passed in
let arg = &args[0];
let it = arg.split("::");
let p: Vec<&str> = it.collect();
let session = core::Session::from_path(&Path::new("."), &Path::new("."));
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session);
}
}
}
}
#[cfg(not(test))]
fn prefix(args: &[String]) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let linenum = args[0].parse::<usize>().unwrap();
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(&fname);
let session = core::Session::from_path(&fpath, &substitute_file);
// print the start, end, and the identifier prefix being matched
let path = Path::new(fname);
let line = &getline(&path, linenum, &session);
let (start, pos) = util::expand_ident(line, charnum);
println!("PREFIX {},{},{}", start, pos, &line[start..pos]);
}
#[cfg(not(test))]
fn find_definition(args: &[String]) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let linenum = args[0].parse::<usize>().unwrap();
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(&fname);
let session = core::Session::from_path(&fpath, &substitute_file);
let src = session.load_file(&fpath);
let pos = scopes::coords_to_point(&src, linenum, charnum);
core::find_definition(&src, &fpath, pos, &session).map(|m| match_fn(m, &session));
println!("END");
}
#[cfg(not(test))]
fn print_usage() {
let program = std::env::args().next().unwrap().clone();
println!("usage: {} complete linenum charnum fname [substitute_file]", program);
println!("or: {} find-definition linenum charnum fname [substitute_file]", program);
println!("or: {} complete fullyqualifiedname (e.g. std::io::)", program);
println!("or: {} prefix linenum charnum fname", program);
println!("or replace complete with complete-with-snippet for more detailed completions.");
println!("or: {} daemon - to start a process that receives the above commands via stdin", program);
}
#[cfg(not(test))]
fn check_rust_src_env_var() {
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if!v.is_empty() {
let f = Path::new(v[0]);
if!path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if!path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
#[cfg(not(test))]
fn daemon() {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
if n == 0 {
break;
}
let args: Vec<String> = input.split(" ").map(|s| s.trim().to_string()).collect();
run(args);
input.clear();
}
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let mut args: Vec<String> = std::env::args().collect();
if args.len() == 1 {
print_usage();
std::process::exit(1);
}
args.remove(0);
run(args);
}
#[cfg(not(test))]
fn run(mut args: Vec<String>) {
//let command = &args[0];
let command = args.remove(0);
match &command[..] {
"daemon" => daemon(),
"prefix" => prefix(&args),
"complete" => complete(args, CompletePrinter::Normal),
"complete-with-snippet" => complete(args, CompletePrinter::WithSnippets),
"find-definition" => find_definition(&args),
"help" => print_usage(),
cmd => {
println!("Sorry, I didn't understand command {}", cmd);
print_usage();
std::process::exit(1);
}
}
}
|
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(args, linenum, print_type);
}).unwrap();
|
random_line_split
|
main.rs
|
#![cfg_attr(all(test, feature = "nightly"), feature(test))] // we only need test feature when testing
#[macro_use] extern crate log;
extern crate syntex_syntax;
extern crate toml;
extern crate env_logger;
extern crate racer;
#[cfg(not(test))]
use racer::core;
#[cfg(not(test))]
use racer::util;
#[cfg(not(test))]
use racer::core::Match;
#[cfg(not(test))]
use racer::util::{getline, path_exists};
#[cfg(not(test))]
use racer::nameres::{do_file_search, do_external_search, PATH_SEP};
#[cfg(not(test))]
use racer::scopes;
#[cfg(not(test))]
use std::path::Path;
#[cfg(not(test))]
fn match_with_snippet_fn(m: Match, session: core::SessionRef) {
let (linenum, charnum) = scopes::point_to_coords_from_file(&m.filepath, m.point, session).unwrap();
if m.matchstr == "" {
panic!("MATCHSTR is empty - waddup?");
}
let snippet = racer::snippets::snippet_for_match(&m, session);
println!("MATCH {};{};{};{};{};{:?};{}", m.matchstr,
snippet,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr
);
}
#[cfg(not(test))]
fn match_fn(m: Match, session: core::SessionRef) {
if let Some((linenum, charnum)) = scopes::point_to_coords_from_file(&m.filepath,
m.point,
session) {
println!("MATCH {},{},{},{},{:?},{}", m.matchstr,
linenum.to_string(),
charnum.to_string(),
m.filepath.to_str().unwrap(),
m.mtype,
m.contextstr
);
} else {
error!("Could not resolve file coords for match {:?}", m);
}
}
#[cfg(not(test))]
fn complete(args: Vec<String>, print_type: CompletePrinter) {
if args.len() < 1 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
match args[0].parse::<usize>() {
Ok(linenum) => {
complete_by_line_coords(args, linenum, print_type);
}
Err(_) => {
external_complete(&args);
}
}
}
#[cfg(not(test))]
fn complete_by_line_coords(args: Vec<String>,
linenum: usize,
print_type: CompletePrinter) {
// input: linenum, colnum, fname
let tb = std::thread::Builder::new().name("searcher".to_string());
// PD: this probably sucks for performance, but lots of plugins
// end up failing and leaving tmp files around if racer crashes,
// so catch the crash.
let res = tb.spawn(move || {
run_the_complete_fn(args, linenum, print_type);
}).unwrap();
match res.join() {
Ok(_) => {},
Err(e) => {
error!("Search thread paniced: {:?}", e);
}
}
println!("END");
}
#[cfg(not(test))]
enum CompletePrinter {
Normal,
WithSnippets
}
#[cfg(not(test))]
fn run_the_complete_fn(args: Vec<String>, linenum: usize, print_type: CompletePrinter) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(fname);
let session = core::Session::from_path(&fpath, &substitute_file);
let src = session.load_file(&fpath);
let line = &getline(&substitute_file, linenum, &session);
let (start, pos) = util::expand_ident(line, charnum);
println!("PREFIX {},{},{}", start, pos, &line[start..pos]);
let point = scopes::coords_to_point(&src, linenum, charnum);
for m in core::complete_from_file(&src, &fpath, point, &session) {
match print_type {
CompletePrinter::Normal => match_fn(m, &session),
CompletePrinter::WithSnippets => match_with_snippet_fn(m, &session),
};
}
}
#[cfg(not(test))]
fn external_complete(args: &[String]) {
// input: a command line string passed in
let arg = &args[0];
let it = arg.split("::");
let p: Vec<&str> = it.collect();
let session = core::Session::from_path(&Path::new("."), &Path::new("."));
for m in do_file_search(p[0], &Path::new(".")) {
if p.len() == 1 {
match_fn(m, &session);
} else {
for m in do_external_search(&p[1..], &m.filepath, m.point,
core::SearchType::StartsWith,
core::Namespace::BothNamespaces, &session) {
match_fn(m, &session);
}
}
}
}
#[cfg(not(test))]
fn prefix(args: &[String]) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let linenum = args[0].parse::<usize>().unwrap();
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(&fname);
let session = core::Session::from_path(&fpath, &substitute_file);
// print the start, end, and the identifier prefix being matched
let path = Path::new(fname);
let line = &getline(&path, linenum, &session);
let (start, pos) = util::expand_ident(line, charnum);
println!("PREFIX {},{},{}", start, pos, &line[start..pos]);
}
#[cfg(not(test))]
fn find_definition(args: &[String]) {
if args.len() < 3 {
println!("Provide more arguments!");
print_usage();
std::process::exit(1);
}
let linenum = args[0].parse::<usize>().unwrap();
let charnum = args[1].parse::<usize>().unwrap();
let fname = &args[2];
let substitute_file = Path::new(match args.len() > 3 {
true => &args[3],
false => fname
});
let fpath = Path::new(&fname);
let session = core::Session::from_path(&fpath, &substitute_file);
let src = session.load_file(&fpath);
let pos = scopes::coords_to_point(&src, linenum, charnum);
core::find_definition(&src, &fpath, pos, &session).map(|m| match_fn(m, &session));
println!("END");
}
#[cfg(not(test))]
fn print_usage() {
let program = std::env::args().next().unwrap().clone();
println!("usage: {} complete linenum charnum fname [substitute_file]", program);
println!("or: {} find-definition linenum charnum fname [substitute_file]", program);
println!("or: {} complete fullyqualifiedname (e.g. std::io::)", program);
println!("or: {} prefix linenum charnum fname", program);
println!("or replace complete with complete-with-snippet for more detailed completions.");
println!("or: {} daemon - to start a process that receives the above commands via stdin", program);
}
#[cfg(not(test))]
fn check_rust_src_env_var()
|
#[cfg(not(test))]
fn daemon() {
use std::io;
let mut input = String::new();
while let Ok(n) = io::stdin().read_line(&mut input) {
if n == 0 {
break;
}
let args: Vec<String> = input.split(" ").map(|s| s.trim().to_string()).collect();
run(args);
input.clear();
}
}
#[cfg(not(test))]
fn main() {
// make sure we get a stack trace ifwe panic
::std::env::set_var("RUST_BACKTRACE","1");
env_logger::init().unwrap();
check_rust_src_env_var();
let mut args: Vec<String> = std::env::args().collect();
if args.len() == 1 {
print_usage();
std::process::exit(1);
}
args.remove(0);
run(args);
}
#[cfg(not(test))]
fn run(mut args: Vec<String>) {
//let command = &args[0];
let command = args.remove(0);
match &command[..] {
"daemon" => daemon(),
"prefix" => prefix(&args),
"complete" => complete(args, CompletePrinter::Normal),
"complete-with-snippet" => complete(args, CompletePrinter::WithSnippets),
"find-definition" => find_definition(&args),
"help" => print_usage(),
cmd => {
println!("Sorry, I didn't understand command {}", cmd);
print_usage();
std::process::exit(1);
}
}
}
|
{
if let Ok(srcpaths) = std::env::var("RUST_SRC_PATH") {
let v = srcpaths.split(PATH_SEP).collect::<Vec<_>>();
if !v.is_empty() {
let f = Path::new(v[0]);
if !path_exists(f) {
println!("racer can't find the directory pointed to by the RUST_SRC_PATH variable \"{}\". Try using an absolute fully qualified path and make sure it points to the src directory of a rust checkout - e.g. \"/home/foouser/src/rust/src\".", srcpaths);
std::process::exit(1);
} else if !path_exists(f.join("libstd")) {
println!("Unable to find libstd under RUST_SRC_PATH. N.B. RUST_SRC_PATH variable needs to point to the *src* directory inside a rust checkout e.g. \"/home/foouser/src/rust/src\". Current value \"{}\"", srcpaths);
std::process::exit(1);
}
}
} else {
println!("RUST_SRC_PATH environment variable must be set to point to the src directory of a rust checkout. E.g. \"/home/foouser/src/rust/src\"");
std::process::exit(1);
}
}
|
identifier_body
|
expf32.rs
|
#![feature(core, core_intrinsics, core_float)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::expf32;
use core::num::Float;
use core::f32;
// pub fn expf32(x: f32) -> f32;
#[test]
fn expf32_test1()
|
#[test]
fn expf32_test2() {
let x: f32 = f32::infinity();
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, f32::infinity());
}
#[test]
fn expf32_test3() {
let x: f32 = f32::neg_infinity();
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, 0.0);
}
#[test]
fn expf32_test4() {
let x: f32 = 1.0;
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, 2.7182817);
}
}
|
{
let x: f32 = f32::nan();
let result: f32 = unsafe { expf32(x) };
assert_eq!(result.is_nan(), true);
}
|
identifier_body
|
expf32.rs
|
#![feature(core, core_intrinsics, core_float)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::expf32;
use core::num::Float;
use core::f32;
// pub fn expf32(x: f32) -> f32;
#[test]
fn expf32_test1() {
let x: f32 = f32::nan();
let result: f32 = unsafe { expf32(x) };
|
}
#[test]
fn expf32_test2() {
let x: f32 = f32::infinity();
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, f32::infinity());
}
#[test]
fn expf32_test3() {
let x: f32 = f32::neg_infinity();
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, 0.0);
}
#[test]
fn expf32_test4() {
let x: f32 = 1.0;
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, 2.7182817);
}
}
|
assert_eq!(result.is_nan(), true);
|
random_line_split
|
expf32.rs
|
#![feature(core, core_intrinsics, core_float)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::expf32;
use core::num::Float;
use core::f32;
// pub fn expf32(x: f32) -> f32;
#[test]
fn expf32_test1() {
let x: f32 = f32::nan();
let result: f32 = unsafe { expf32(x) };
assert_eq!(result.is_nan(), true);
}
#[test]
fn
|
() {
let x: f32 = f32::infinity();
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, f32::infinity());
}
#[test]
fn expf32_test3() {
let x: f32 = f32::neg_infinity();
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, 0.0);
}
#[test]
fn expf32_test4() {
let x: f32 = 1.0;
let result: f32 = unsafe { expf32(x) };
assert_eq!(result, 2.7182817);
}
}
|
expf32_test2
|
identifier_name
|
WalkingState.rs
|
# Velocity
# AngularVelocity
# Relative Orientation
# Relative Angular Velocity
#----------------
# Heading
-0.000295
# Root(pelvis)
0 0.943719 0
0.999705 0.001812 0.000000 -0.024203
0.085812 0.006683 0.046066
0.020659 0.025804 -0.043029
# pelvis_lowerback
0.999456 -0.008591 -0.000383 0.031834
0.084348 -0.020366 0.074172
# lHip
0.995073 -0.098970 0.000633 -0.005810
0.889784 -0.036860 0.180944
# rHip
0.998479 -0.002838 0.002773 0.054987
0.569180 0.030032 -0.066712
# lowerback_torso
0.999995 -0.002676 0.000476 0.001554
-0.040065 0.001500 0.000981
# lKnee
0.986009 0.166690 0.000000 0.000000
-0.685415 0.000022 0.000004
# rKnee
0.999983 0.005818 -0.000000 0.000000
-1.019084 -0.000031 0.000003
# torso_head
0.999937 0.004552 -0.009214 -0.004602
-0.072454 -0.033109 -0.015390
# lShoulder
0.702761 0.005656 0.005498 -0.711382
-0.070070 0.001655 -0.040434
# rShoulder
0.711961 0.005361 -0.005571 0.702177
-0.067033 0.001650 -0.040399
# lAnkle
0.997092 -0.070354 0.002061 0.029209
-0.207174 -0.019407 -0.136886
# rAnkle
0.999514 -0.004680 -0.000144 -0.030818
0.556648 0.002139 0.223915
# lElbow
1.000000 0.000000 -0.000033 -0.000000
0.000000 -0.015576 -0.000000
# rElbow
1.000000 -0.000000 0.000032 -0.000000
-0.000000 0.015304 -0.000000
# lToeJoint
0.999987 0.005036 0.000000 -0.000000
0.119983 -0.000000 -0.000000
# rToeJoint
1.000000 -0.000067 -0.000000 -0.000000
-0.149168 0.000018 -0.000000
|
# order is:
# Heading
# Position
# Orientation
|
random_line_split
|
|
vendor.rs
|
use crate::command_prelude::*;
use cargo::ops;
use std::path::PathBuf;
pub fn cli() -> App {
subcommand("vendor")
.about("Vendor all dependencies for a project locally")
.arg(opt("quiet", "No output printed to stdout").short("q"))
.arg_manifest_path()
.arg(Arg::with_name("path").help("Where to vendor crates (`vendor` by default)"))
.arg(
Arg::with_name("no-delete")
.long("no-delete")
.help("Don't delete older crates in the vendor directory"),
)
.arg(
Arg::with_name("tomls")
.short("s")
.long("sync")
.help("Additional `Cargo.toml` to sync and vendor")
.value_name("TOML")
.multiple(true),
)
.arg(
Arg::with_name("respect-source-config")
.long("respect-source-config")
.help("Respect `[source]` config in `.cargo/config`")
.multiple(true),
)
.arg(
Arg::with_name("versioned-dirs")
.long("versioned-dirs")
.help("Always include version in subdir name"),
)
.arg(
Arg::with_name("no-merge-sources")
.long("no-merge-sources")
.hidden(true),
)
.arg(
Arg::with_name("relative-path")
.long("relative-path")
.hidden(true),
)
.arg(
Arg::with_name("only-git-deps")
.long("only-git-deps")
.hidden(true),
)
.arg(
Arg::with_name("disallow-duplicates")
.long("disallow-duplicates")
.hidden(true),
)
.after_help("Run `cargo help vendor` for more detailed information.\n")
}
pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult
|
} else {
None
};
if let Some(flag) = crates_io_cargo_vendor_flag {
return Err(anyhow::format_err!(
"\
the crates.io `cargo vendor` command has now been merged into Cargo itself
and does not support the flag `{}` currently; to continue using the flag you
can execute `cargo-vendor vendor...`, and if you would like to see this flag
supported in Cargo itself please feel free to file an issue at
https://github.com/rust-lang/cargo/issues/new
",
flag
)
.into());
}
let ws = args.workspace(config)?;
let path = args
.value_of_os("path")
.map(|val| PathBuf::from(val.to_os_string()))
.unwrap_or_else(|| PathBuf::from("vendor"));
ops::vendor(
&ws,
&ops::VendorOptions {
no_delete: args.is_present("no-delete"),
destination: &path,
versioned_dirs: args.is_present("versioned-dirs"),
extra: args
.values_of_os("tomls")
.unwrap_or_default()
.map(|s| PathBuf::from(s.to_os_string()))
.collect(),
},
)?;
Ok(())
}
|
{
// We're doing the vendoring operation ourselves, so we don't actually want
// to respect any of the `source` configuration in Cargo itself. That's
// intended for other consumers of Cargo, but we want to go straight to the
// source, e.g. crates.io, to fetch crates.
if !args.is_present("respect-source-config") {
config.values_mut()?.remove("source");
}
// When we moved `cargo vendor` into Cargo itself we didn't stabilize a few
// flags, so try to provide a helpful error message in that case to ensure
// that users currently using the flag aren't tripped up.
let crates_io_cargo_vendor_flag = if args.is_present("no-merge-sources") {
Some("--no-merge-sources")
} else if args.is_present("relative-path") {
Some("--relative-path")
} else if args.is_present("only-git-deps") {
Some("--only-git-deps")
} else if args.is_present("disallow-duplicates") {
Some("--disallow-duplicates")
|
identifier_body
|
vendor.rs
|
use crate::command_prelude::*;
use cargo::ops;
use std::path::PathBuf;
pub fn cli() -> App {
subcommand("vendor")
.about("Vendor all dependencies for a project locally")
.arg(opt("quiet", "No output printed to stdout").short("q"))
.arg_manifest_path()
.arg(Arg::with_name("path").help("Where to vendor crates (`vendor` by default)"))
.arg(
Arg::with_name("no-delete")
.long("no-delete")
.help("Don't delete older crates in the vendor directory"),
)
.arg(
Arg::with_name("tomls")
.short("s")
.long("sync")
.help("Additional `Cargo.toml` to sync and vendor")
.value_name("TOML")
.multiple(true),
)
.arg(
Arg::with_name("respect-source-config")
.long("respect-source-config")
.help("Respect `[source]` config in `.cargo/config`")
.multiple(true),
)
.arg(
Arg::with_name("versioned-dirs")
.long("versioned-dirs")
.help("Always include version in subdir name"),
)
.arg(
Arg::with_name("no-merge-sources")
.long("no-merge-sources")
.hidden(true),
)
.arg(
Arg::with_name("relative-path")
.long("relative-path")
.hidden(true),
)
.arg(
Arg::with_name("only-git-deps")
.long("only-git-deps")
.hidden(true),
)
.arg(
Arg::with_name("disallow-duplicates")
.long("disallow-duplicates")
.hidden(true),
)
.after_help("Run `cargo help vendor` for more detailed information.\n")
}
pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult {
// We're doing the vendoring operation ourselves, so we don't actually want
// to respect any of the `source` configuration in Cargo itself. That's
// intended for other consumers of Cargo, but we want to go straight to the
// source, e.g. crates.io, to fetch crates.
if!args.is_present("respect-source-config") {
config.values_mut()?.remove("source");
}
// When we moved `cargo vendor` into Cargo itself we didn't stabilize a few
// flags, so try to provide a helpful error message in that case to ensure
|
} else if args.is_present("only-git-deps") {
Some("--only-git-deps")
} else if args.is_present("disallow-duplicates") {
Some("--disallow-duplicates")
} else {
None
};
if let Some(flag) = crates_io_cargo_vendor_flag {
return Err(anyhow::format_err!(
"\
the crates.io `cargo vendor` command has now been merged into Cargo itself
and does not support the flag `{}` currently; to continue using the flag you
can execute `cargo-vendor vendor...`, and if you would like to see this flag
supported in Cargo itself please feel free to file an issue at
https://github.com/rust-lang/cargo/issues/new
",
flag
)
.into());
}
let ws = args.workspace(config)?;
let path = args
.value_of_os("path")
.map(|val| PathBuf::from(val.to_os_string()))
.unwrap_or_else(|| PathBuf::from("vendor"));
ops::vendor(
&ws,
&ops::VendorOptions {
no_delete: args.is_present("no-delete"),
destination: &path,
versioned_dirs: args.is_present("versioned-dirs"),
extra: args
.values_of_os("tomls")
.unwrap_or_default()
.map(|s| PathBuf::from(s.to_os_string()))
.collect(),
},
)?;
Ok(())
}
|
// that users currently using the flag aren't tripped up.
let crates_io_cargo_vendor_flag = if args.is_present("no-merge-sources") {
Some("--no-merge-sources")
} else if args.is_present("relative-path") {
Some("--relative-path")
|
random_line_split
|
vendor.rs
|
use crate::command_prelude::*;
use cargo::ops;
use std::path::PathBuf;
pub fn cli() -> App {
subcommand("vendor")
.about("Vendor all dependencies for a project locally")
.arg(opt("quiet", "No output printed to stdout").short("q"))
.arg_manifest_path()
.arg(Arg::with_name("path").help("Where to vendor crates (`vendor` by default)"))
.arg(
Arg::with_name("no-delete")
.long("no-delete")
.help("Don't delete older crates in the vendor directory"),
)
.arg(
Arg::with_name("tomls")
.short("s")
.long("sync")
.help("Additional `Cargo.toml` to sync and vendor")
.value_name("TOML")
.multiple(true),
)
.arg(
Arg::with_name("respect-source-config")
.long("respect-source-config")
.help("Respect `[source]` config in `.cargo/config`")
.multiple(true),
)
.arg(
Arg::with_name("versioned-dirs")
.long("versioned-dirs")
.help("Always include version in subdir name"),
)
.arg(
Arg::with_name("no-merge-sources")
.long("no-merge-sources")
.hidden(true),
)
.arg(
Arg::with_name("relative-path")
.long("relative-path")
.hidden(true),
)
.arg(
Arg::with_name("only-git-deps")
.long("only-git-deps")
.hidden(true),
)
.arg(
Arg::with_name("disallow-duplicates")
.long("disallow-duplicates")
.hidden(true),
)
.after_help("Run `cargo help vendor` for more detailed information.\n")
}
pub fn
|
(config: &mut Config, args: &ArgMatches<'_>) -> CliResult {
// We're doing the vendoring operation ourselves, so we don't actually want
// to respect any of the `source` configuration in Cargo itself. That's
// intended for other consumers of Cargo, but we want to go straight to the
// source, e.g. crates.io, to fetch crates.
if!args.is_present("respect-source-config") {
config.values_mut()?.remove("source");
}
// When we moved `cargo vendor` into Cargo itself we didn't stabilize a few
// flags, so try to provide a helpful error message in that case to ensure
// that users currently using the flag aren't tripped up.
let crates_io_cargo_vendor_flag = if args.is_present("no-merge-sources") {
Some("--no-merge-sources")
} else if args.is_present("relative-path") {
Some("--relative-path")
} else if args.is_present("only-git-deps") {
Some("--only-git-deps")
} else if args.is_present("disallow-duplicates") {
Some("--disallow-duplicates")
} else {
None
};
if let Some(flag) = crates_io_cargo_vendor_flag {
return Err(anyhow::format_err!(
"\
the crates.io `cargo vendor` command has now been merged into Cargo itself
and does not support the flag `{}` currently; to continue using the flag you
can execute `cargo-vendor vendor...`, and if you would like to see this flag
supported in Cargo itself please feel free to file an issue at
https://github.com/rust-lang/cargo/issues/new
",
flag
)
.into());
}
let ws = args.workspace(config)?;
let path = args
.value_of_os("path")
.map(|val| PathBuf::from(val.to_os_string()))
.unwrap_or_else(|| PathBuf::from("vendor"));
ops::vendor(
&ws,
&ops::VendorOptions {
no_delete: args.is_present("no-delete"),
destination: &path,
versioned_dirs: args.is_present("versioned-dirs"),
extra: args
.values_of_os("tomls")
.unwrap_or_default()
.map(|s| PathBuf::from(s.to_os_string()))
.collect(),
},
)?;
Ok(())
}
|
exec
|
identifier_name
|
borrowed-struct.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print *stack_val_ref
// gdb-check:$1 = {x = 10, y = 23.5}
// gdb-command:print *stack_val_interior_ref_1
// gdb-check:$2 = 10
// gdb-command:print *stack_val_interior_ref_2
// gdb-check:$3 = 23.5
// gdb-command:print *ref_to_unnamed
// gdb-check:$4 = {x = 11, y = 24.5}
// gdb-command:print *unique_val_ref
// gdb-check:$5 = {x = 13, y = 26.5}
// gdb-command:print *unique_val_interior_ref_1
// gdb-check:$6 = 13
// gdb-command:print *unique_val_interior_ref_2
// gdb-check:$7 = 26.5
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print *stack_val_ref
// lldb-check:[...]$0 = SomeStruct { x: 10, y: 23.5 }
// lldb-command:print *stack_val_interior_ref_1
// lldb-check:[...]$1 = 10
// lldb-command:print *stack_val_interior_ref_2
|
// lldb-command:print *unique_val_ref
// lldb-check:[...]$4 = SomeStruct { x: 13, y: 26.5 }
// lldb-command:print *unique_val_interior_ref_1
// lldb-check:[...]$5 = 13
// lldb-command:print *unique_val_interior_ref_2
// lldb-check:[...]$6 = 26.5
#![allow(unused_variable)]
struct SomeStruct {
x: int,
y: f64
}
fn main() {
let stack_val: SomeStruct = SomeStruct { x: 10, y: 23.5 };
let stack_val_ref: &SomeStruct = &stack_val;
let stack_val_interior_ref_1: &int = &stack_val.x;
let stack_val_interior_ref_2: &f64 = &stack_val.y;
let ref_to_unnamed: &SomeStruct = &SomeStruct { x: 11, y: 24.5 };
let unique_val = box SomeStruct { x: 13, y: 26.5 };
let unique_val_ref: &SomeStruct = &*unique_val;
let unique_val_interior_ref_1: &int = &unique_val.x;
let unique_val_interior_ref_2: &f64 = &unique_val.y;
zzz(); // #break
}
fn zzz() {()}
|
// lldb-check:[...]$2 = 23.5
// lldb-command:print *ref_to_unnamed
// lldb-check:[...]$3 = SomeStruct { x: 11, y: 24.5 }
|
random_line_split
|
borrowed-struct.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print *stack_val_ref
// gdb-check:$1 = {x = 10, y = 23.5}
// gdb-command:print *stack_val_interior_ref_1
// gdb-check:$2 = 10
// gdb-command:print *stack_val_interior_ref_2
// gdb-check:$3 = 23.5
// gdb-command:print *ref_to_unnamed
// gdb-check:$4 = {x = 11, y = 24.5}
// gdb-command:print *unique_val_ref
// gdb-check:$5 = {x = 13, y = 26.5}
// gdb-command:print *unique_val_interior_ref_1
// gdb-check:$6 = 13
// gdb-command:print *unique_val_interior_ref_2
// gdb-check:$7 = 26.5
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print *stack_val_ref
// lldb-check:[...]$0 = SomeStruct { x: 10, y: 23.5 }
// lldb-command:print *stack_val_interior_ref_1
// lldb-check:[...]$1 = 10
// lldb-command:print *stack_val_interior_ref_2
// lldb-check:[...]$2 = 23.5
// lldb-command:print *ref_to_unnamed
// lldb-check:[...]$3 = SomeStruct { x: 11, y: 24.5 }
// lldb-command:print *unique_val_ref
// lldb-check:[...]$4 = SomeStruct { x: 13, y: 26.5 }
// lldb-command:print *unique_val_interior_ref_1
// lldb-check:[...]$5 = 13
// lldb-command:print *unique_val_interior_ref_2
// lldb-check:[...]$6 = 26.5
#![allow(unused_variable)]
struct
|
{
x: int,
y: f64
}
fn main() {
let stack_val: SomeStruct = SomeStruct { x: 10, y: 23.5 };
let stack_val_ref: &SomeStruct = &stack_val;
let stack_val_interior_ref_1: &int = &stack_val.x;
let stack_val_interior_ref_2: &f64 = &stack_val.y;
let ref_to_unnamed: &SomeStruct = &SomeStruct { x: 11, y: 24.5 };
let unique_val = box SomeStruct { x: 13, y: 26.5 };
let unique_val_ref: &SomeStruct = &*unique_val;
let unique_val_interior_ref_1: &int = &unique_val.x;
let unique_val_interior_ref_2: &f64 = &unique_val.y;
zzz(); // #break
}
fn zzz() {()}
|
SomeStruct
|
identifier_name
|
mem.rs
|
let request: ReporterRequest = message.to().unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if!self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &String) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if!self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest
|
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe {
::libc::sysconf(::libc::_SC_PAGESIZE) as usize
}
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = option_try!(File::open("/proc/self/statm").ok());
let mut contents = String::new();
option_try!(f.read_to_string(&mut contents).ok());
let s = option_try!(contents.split_whitespace().nth(field));
let npages = option_try!(s.parse::<usize>().ok());
Some(npages * page_size())
}
#[cfg(target_os = "linux")]
fn vsize() -> Option<usize> {
proc_self_statm_field(0)
}
#[cfg(target_os = "linux")]
fn resident() -> Option<usize> {
proc_self_statm_field(1)
}
#[cfg(target_os = "macos")]
fn vsize() -> Option<usize> {
virtual_size()
}
#[cfg(target_os = "macos")]
fn resident() -> Option<usize> {
resident_size()
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn vsize() -> Option<usize> {
None
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn resident() -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn resident_segments() -> Vec<(String, usize)> {
use regex::Regex;
|
{
ReportsForest {
trees: HashMap::new(),
}
}
|
identifier_body
|
mem.rs
|
reporters: HashMap<String, Reporter>,
}
const JEMALLOC_HEAP_ALLOCATED_STR: &'static str = "jemalloc-heap-allocated";
const SYSTEM_HEAP_ALLOCATED_STR: &'static str = "system-heap-allocated";
impl Profiler {
pub fn create(period: Option<f64>) -> ProfilerChan {
let (chan, port) = ipc::channel().unwrap();
// Create the timer thread if a period was provided.
if let Some(period) = period {
let period_ms = (period * 1000.) as u32;
let chan = chan.clone();
spawn_named("Memory profiler timer".to_owned(), move || {
loop {
sleep_ms(period_ms);
if chan.send(ProfilerMsg::Print).is_err() {
break;
}
}
});
}
// Always spawn the memory profiler. If there is no timer thread it won't receive regular
// `Print` events, but it will still receive the other events.
spawn_named("Memory profiler".to_owned(), move || {
let mut mem_profiler = Profiler::new(port);
mem_profiler.start();
});
let mem_profiler_chan = ProfilerChan(chan);
// Register the system memory reporter, which will run on its own thread. It never needs to
// be unregistered, because as long as the memory profiler is running the system memory
// reporter can make measurements.
let (system_reporter_sender, system_reporter_receiver) = ipc::channel().unwrap();
ROUTER.add_route(system_reporter_receiver.to_opaque(), box |message| {
let request: ReporterRequest = message.to().unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if!self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &String) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if!self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::
|
/// The port through which messages are received.
pub port: IpcReceiver<ProfilerMsg>,
/// Registered memory reporters.
|
random_line_split
|
|
mem.rs
|
let request: ReporterRequest = message.to().unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if!self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &String) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if!self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size!= 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if!self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if!self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn
|
(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() &&!b.children.is_empty() {
Ordering::Greater
} else if!a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if!tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv!= 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv!= 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe {
::libc::sysconf(::libc::_SC_PAGESIZE) as usize
}
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = option_try!(File::open("/proc/self/statm").ok());
let mut contents = String::new();
option_try!(f.read_to_string(&mut contents).ok());
let s = option_try!(contents.split_whitespace().nth(field));
let npages = option_try!(s.parse::<usize>().ok());
Some(npages * page_size())
}
#[cfg(target_os = "linux")]
fn vsize() -> Option<usize> {
proc_self_statm_field(0)
}
#[cfg(target_os = "linux")]
fn resident() -> Option<usize> {
proc_self_statm_field(1)
}
#[cfg(target_os = "macos")]
fn vsize() -> Option<usize> {
virtual_size()
}
#[cfg(target_os = "macos")]
fn resident() -> Option<usize> {
resident_size()
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn vsize() -> Option<usize> {
None
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn resident() -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn resident_segments() -> Vec<(String, usize)> {
use regex::Regex;
|
print
|
identifier_name
|
expr-match-unique.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax)]
// Tests for match as expressions resulting in boxed types
|
fn test_box() {
let res: Box<_> = match true { true => { box 100 }, _ => panic!() };
assert_eq!(*res, 100);
}
pub fn main() { test_box(); }
|
random_line_split
|
|
expr-match-unique.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax)]
// Tests for match as expressions resulting in boxed types
fn test_box()
|
pub fn main() { test_box(); }
|
{
let res: Box<_> = match true { true => { box 100 }, _ => panic!() };
assert_eq!(*res, 100);
}
|
identifier_body
|
expr-match-unique.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax)]
// Tests for match as expressions resulting in boxed types
fn test_box() {
let res: Box<_> = match true { true => { box 100 }, _ => panic!() };
assert_eq!(*res, 100);
}
pub fn
|
() { test_box(); }
|
main
|
identifier_name
|
callbacks.rs
|
// Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Private callback support functions.
use libc::{c_double, c_int, c_uint};
use std::mem;
use super::*;
macro_rules! callback(
(
type Args = ($($arg:ident: $arg_ty:ty),*);
type Callback = $Callback:ident;
let ext_set = $ext_set:expr;
fn callback($($ext_arg:ident: $ext_arg_ty:ty),*) $call:expr
) => (
local_data_key!(CALLBACK_KEY: Box<Object<Args> +'static>)
type Args = ($($arg_ty),*,);
|
trait Object<T> {
fn call(&self, args: T);
}
impl<UserData> Object<Args> for ::Callback<fn($($arg_ty),*, &UserData), UserData> {
fn call(&self, ($($arg),*,): Args) {
(self.f)($($arg),*, &self.data);
}
}
pub fn set<UserData:'static>(f: ::$Callback<UserData>) {
CALLBACK_KEY.replace(Some(box f as Box<Object<Args> +'static>));
($ext_set)(Some(callback));
}
pub fn unset() {
CALLBACK_KEY.replace(None);
($ext_set)(None);
}
extern "C" fn callback($($ext_arg: $ext_arg_ty),*) {
match CALLBACK_KEY.get() {
Some(cb) => unsafe { cb.call($call) },
_ => {}
}
}
)
)
pub mod error {
use libc::{c_int, c_char};
use std::mem;
use std::string;
callback!(
type Args = (error: ::Error, description: String);
type Callback = ErrorCallback;
let ext_set = |cb| unsafe { ::ffi::glfwSetErrorCallback(cb) };
fn callback(error: c_int, description: *const c_char) {
(mem::transmute(error), string::raw::from_buf(
mem::transmute(description)))
}
)
}
pub mod monitor {
use libc::{c_int};
use std::mem;
use std::kinds::marker;
callback!(
type Args = (monitor: ::Monitor, event: ::MonitorEvent);
type Callback = MonitorCallback;
let ext_set = |cb| unsafe { ::ffi::glfwSetMonitorCallback(cb) };
fn callback(monitor: *mut ::ffi::GLFWmonitor, event: c_int) {
let monitor = ::Monitor {
ptr: monitor,
no_copy: marker::NoCopy,
no_send: marker::NoSend,
no_share: marker::NoSync,
};
(monitor, mem::transmute(event))
}
)
}
unsafe fn get_sender<'a>(window: &'a *mut ffi::GLFWwindow) -> &'a Sender<(f64, WindowEvent)> {
mem::transmute(ffi::glfwGetWindowUserPointer(*window))
}
macro_rules! window_callback(
(fn $name:ident () => $event:ident) => (
pub extern "C" fn $name(window: *mut ffi::GLFWwindow) {
unsafe { get_sender(&window).send((ffi::glfwGetTime() as f64, $event)); }
}
);
(fn $name:ident ($($ext_arg:ident: $ext_arg_ty:ty),*) => $event:ident($($arg_conv:expr),*)) => (
pub extern "C" fn $name(window: *mut ffi::GLFWwindow $(, $ext_arg: $ext_arg_ty)*) {
unsafe { get_sender(&window).send((ffi::glfwGetTime() as f64, $event($($arg_conv),*))); }
}
);
)
window_callback!(fn window_pos_callback(xpos: c_int, ypos: c_int) => PosEvent(xpos as i32, ypos as i32))
window_callback!(fn window_size_callback(width: c_int, height: c_int) => SizeEvent(width as i32, height as i32))
window_callback!(fn window_close_callback() => CloseEvent)
window_callback!(fn window_refresh_callback() => RefreshEvent)
window_callback!(fn window_focus_callback(focused: c_int) => FocusEvent(focused == ffi::TRUE))
window_callback!(fn window_iconify_callback(iconified: c_int) => IconifyEvent(iconified == ffi::TRUE))
window_callback!(fn framebuffer_size_callback(width: c_int, height: c_int) => FramebufferSizeEvent(width as i32, height as i32))
window_callback!(fn mouse_button_callback(button: c_int, action: c_int, mods: c_int) => MouseButtonEvent(mem::transmute(button), mem::transmute(action), Modifiers::from_bits(mods).unwrap()))
window_callback!(fn cursor_pos_callback(xpos: c_double, ypos: c_double) => CursorPosEvent(xpos as f64, ypos as f64))
window_callback!(fn cursor_enter_callback(entered: c_int) => CursorEnterEvent(entered == ffi::TRUE))
window_callback!(fn scroll_callback(xpos: c_double, ypos: c_double) => ScrollEvent(xpos as f64, ypos as f64))
window_callback!(fn key_callback(key: c_int, scancode: c_int, action: c_int, mods: c_int) => KeyEvent(mem::transmute(key), scancode, mem::transmute(action), Modifiers::from_bits(mods).unwrap()))
window_callback!(fn char_callback(character: c_uint) => CharEvent(::std::char::from_u32(character).unwrap()))
|
random_line_split
|
|
callbacks.rs
|
// Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Private callback support functions.
use libc::{c_double, c_int, c_uint};
use std::mem;
use super::*;
macro_rules! callback(
(
type Args = ($($arg:ident: $arg_ty:ty),*);
type Callback = $Callback:ident;
let ext_set = $ext_set:expr;
fn callback($($ext_arg:ident: $ext_arg_ty:ty),*) $call:expr
) => (
local_data_key!(CALLBACK_KEY: Box<Object<Args> +'static>)
type Args = ($($arg_ty),*,);
trait Object<T> {
fn call(&self, args: T);
}
impl<UserData> Object<Args> for ::Callback<fn($($arg_ty),*, &UserData), UserData> {
fn call(&self, ($($arg),*,): Args) {
(self.f)($($arg),*, &self.data);
}
}
pub fn set<UserData:'static>(f: ::$Callback<UserData>) {
CALLBACK_KEY.replace(Some(box f as Box<Object<Args> +'static>));
($ext_set)(Some(callback));
}
pub fn unset() {
CALLBACK_KEY.replace(None);
($ext_set)(None);
}
extern "C" fn callback($($ext_arg: $ext_arg_ty),*) {
match CALLBACK_KEY.get() {
Some(cb) => unsafe { cb.call($call) },
_ => {}
}
}
)
)
pub mod error {
use libc::{c_int, c_char};
use std::mem;
use std::string;
callback!(
type Args = (error: ::Error, description: String);
type Callback = ErrorCallback;
let ext_set = |cb| unsafe { ::ffi::glfwSetErrorCallback(cb) };
fn callback(error: c_int, description: *const c_char) {
(mem::transmute(error), string::raw::from_buf(
mem::transmute(description)))
}
)
}
pub mod monitor {
use libc::{c_int};
use std::mem;
use std::kinds::marker;
callback!(
type Args = (monitor: ::Monitor, event: ::MonitorEvent);
type Callback = MonitorCallback;
let ext_set = |cb| unsafe { ::ffi::glfwSetMonitorCallback(cb) };
fn callback(monitor: *mut ::ffi::GLFWmonitor, event: c_int) {
let monitor = ::Monitor {
ptr: monitor,
no_copy: marker::NoCopy,
no_send: marker::NoSend,
no_share: marker::NoSync,
};
(monitor, mem::transmute(event))
}
)
}
unsafe fn
|
<'a>(window: &'a *mut ffi::GLFWwindow) -> &'a Sender<(f64, WindowEvent)> {
mem::transmute(ffi::glfwGetWindowUserPointer(*window))
}
macro_rules! window_callback(
(fn $name:ident () => $event:ident) => (
pub extern "C" fn $name(window: *mut ffi::GLFWwindow) {
unsafe { get_sender(&window).send((ffi::glfwGetTime() as f64, $event)); }
}
);
(fn $name:ident ($($ext_arg:ident: $ext_arg_ty:ty),*) => $event:ident($($arg_conv:expr),*)) => (
pub extern "C" fn $name(window: *mut ffi::GLFWwindow $(, $ext_arg: $ext_arg_ty)*) {
unsafe { get_sender(&window).send((ffi::glfwGetTime() as f64, $event($($arg_conv),*))); }
}
);
)
window_callback!(fn window_pos_callback(xpos: c_int, ypos: c_int) => PosEvent(xpos as i32, ypos as i32))
window_callback!(fn window_size_callback(width: c_int, height: c_int) => SizeEvent(width as i32, height as i32))
window_callback!(fn window_close_callback() => CloseEvent)
window_callback!(fn window_refresh_callback() => RefreshEvent)
window_callback!(fn window_focus_callback(focused: c_int) => FocusEvent(focused == ffi::TRUE))
window_callback!(fn window_iconify_callback(iconified: c_int) => IconifyEvent(iconified == ffi::TRUE))
window_callback!(fn framebuffer_size_callback(width: c_int, height: c_int) => FramebufferSizeEvent(width as i32, height as i32))
window_callback!(fn mouse_button_callback(button: c_int, action: c_int, mods: c_int) => MouseButtonEvent(mem::transmute(button), mem::transmute(action), Modifiers::from_bits(mods).unwrap()))
window_callback!(fn cursor_pos_callback(xpos: c_double, ypos: c_double) => CursorPosEvent(xpos as f64, ypos as f64))
window_callback!(fn cursor_enter_callback(entered: c_int) => CursorEnterEvent(entered == ffi::TRUE))
window_callback!(fn scroll_callback(xpos: c_double, ypos: c_double) => ScrollEvent(xpos as f64, ypos as f64))
window_callback!(fn key_callback(key: c_int, scancode: c_int, action: c_int, mods: c_int) => KeyEvent(mem::transmute(key), scancode, mem::transmute(action), Modifiers::from_bits(mods).unwrap()))
window_callback!(fn char_callback(character: c_uint) => CharEvent(::std::char::from_u32(character).unwrap()))
|
get_sender
|
identifier_name
|
callbacks.rs
|
// Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Private callback support functions.
use libc::{c_double, c_int, c_uint};
use std::mem;
use super::*;
macro_rules! callback(
(
type Args = ($($arg:ident: $arg_ty:ty),*);
type Callback = $Callback:ident;
let ext_set = $ext_set:expr;
fn callback($($ext_arg:ident: $ext_arg_ty:ty),*) $call:expr
) => (
local_data_key!(CALLBACK_KEY: Box<Object<Args> +'static>)
type Args = ($($arg_ty),*,);
trait Object<T> {
fn call(&self, args: T);
}
impl<UserData> Object<Args> for ::Callback<fn($($arg_ty),*, &UserData), UserData> {
fn call(&self, ($($arg),*,): Args) {
(self.f)($($arg),*, &self.data);
}
}
pub fn set<UserData:'static>(f: ::$Callback<UserData>) {
CALLBACK_KEY.replace(Some(box f as Box<Object<Args> +'static>));
($ext_set)(Some(callback));
}
pub fn unset() {
CALLBACK_KEY.replace(None);
($ext_set)(None);
}
extern "C" fn callback($($ext_arg: $ext_arg_ty),*) {
match CALLBACK_KEY.get() {
Some(cb) => unsafe { cb.call($call) },
_ => {}
}
}
)
)
pub mod error {
use libc::{c_int, c_char};
use std::mem;
use std::string;
callback!(
type Args = (error: ::Error, description: String);
type Callback = ErrorCallback;
let ext_set = |cb| unsafe { ::ffi::glfwSetErrorCallback(cb) };
fn callback(error: c_int, description: *const c_char) {
(mem::transmute(error), string::raw::from_buf(
mem::transmute(description)))
}
)
}
pub mod monitor {
use libc::{c_int};
use std::mem;
use std::kinds::marker;
callback!(
type Args = (monitor: ::Monitor, event: ::MonitorEvent);
type Callback = MonitorCallback;
let ext_set = |cb| unsafe { ::ffi::glfwSetMonitorCallback(cb) };
fn callback(monitor: *mut ::ffi::GLFWmonitor, event: c_int) {
let monitor = ::Monitor {
ptr: monitor,
no_copy: marker::NoCopy,
no_send: marker::NoSend,
no_share: marker::NoSync,
};
(monitor, mem::transmute(event))
}
)
}
unsafe fn get_sender<'a>(window: &'a *mut ffi::GLFWwindow) -> &'a Sender<(f64, WindowEvent)>
|
macro_rules! window_callback(
(fn $name:ident () => $event:ident) => (
pub extern "C" fn $name(window: *mut ffi::GLFWwindow) {
unsafe { get_sender(&window).send((ffi::glfwGetTime() as f64, $event)); }
}
);
(fn $name:ident ($($ext_arg:ident: $ext_arg_ty:ty),*) => $event:ident($($arg_conv:expr),*)) => (
pub extern "C" fn $name(window: *mut ffi::GLFWwindow $(, $ext_arg: $ext_arg_ty)*) {
unsafe { get_sender(&window).send((ffi::glfwGetTime() as f64, $event($($arg_conv),*))); }
}
);
)
window_callback!(fn window_pos_callback(xpos: c_int, ypos: c_int) => PosEvent(xpos as i32, ypos as i32))
window_callback!(fn window_size_callback(width: c_int, height: c_int) => SizeEvent(width as i32, height as i32))
window_callback!(fn window_close_callback() => CloseEvent)
window_callback!(fn window_refresh_callback() => RefreshEvent)
window_callback!(fn window_focus_callback(focused: c_int) => FocusEvent(focused == ffi::TRUE))
window_callback!(fn window_iconify_callback(iconified: c_int) => IconifyEvent(iconified == ffi::TRUE))
window_callback!(fn framebuffer_size_callback(width: c_int, height: c_int) => FramebufferSizeEvent(width as i32, height as i32))
window_callback!(fn mouse_button_callback(button: c_int, action: c_int, mods: c_int) => MouseButtonEvent(mem::transmute(button), mem::transmute(action), Modifiers::from_bits(mods).unwrap()))
window_callback!(fn cursor_pos_callback(xpos: c_double, ypos: c_double) => CursorPosEvent(xpos as f64, ypos as f64))
window_callback!(fn cursor_enter_callback(entered: c_int) => CursorEnterEvent(entered == ffi::TRUE))
window_callback!(fn scroll_callback(xpos: c_double, ypos: c_double) => ScrollEvent(xpos as f64, ypos as f64))
window_callback!(fn key_callback(key: c_int, scancode: c_int, action: c_int, mods: c_int) => KeyEvent(mem::transmute(key), scancode, mem::transmute(action), Modifiers::from_bits(mods).unwrap()))
window_callback!(fn char_callback(character: c_uint) => CharEvent(::std::char::from_u32(character).unwrap()))
|
{
mem::transmute(ffi::glfwGetWindowUserPointer(*window))
}
|
identifier_body
|
uv.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Rust bindings to libuv
*
* This is the base-module for various levels of bindings to
* the libuv library.
*
|
* This base module currently contains a historical, rust-based
* implementation of a few libuv operations that hews closely to
* the patterns of the libuv C-API. It was used, mostly, to explore
* some implementation details and will most likely be deprecated
* in the near future.
*
* The `ll` module contains low-level mappings for working directly
* with the libuv C-API.
*
* The `hl` module contains a set of tools library developers can
* use for interacting with an active libuv loop. This modules's
* API is meant to be used to write high-level,
* rust-idiomatic abstractions for utilizes libuv's asynchronous IO
* facilities.
*/
pub use ll = super::uv_ll;
pub use iotask = uv_iotask;
pub use global_loop = uv_global_loop;
|
* These modules are seeing heavy work, currently, and the final
* API layout should not be inferred from its current form.
*
|
random_line_split
|
test.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{DEFAULT_BUILD_DIR, DEFAULT_PACKAGE_DIR, DEFAULT_SOURCE_DIR, DEFAULT_STORAGE_DIR};
use anyhow::anyhow;
use move_coverage::coverage_map::{CoverageMap, ExecCoverageMapWithModules};
use move_lang::{
command_line::{read_bool_env_var, COLOR_MODE_ENV_VAR},
extension_equals, path_to_string, MOVE_COMPILED_EXTENSION,
};
use std::{
collections::{BTreeMap, HashMap, HashSet},
env,
fs::{self, File},
io::{self, BufRead, Write},
path::{Path, PathBuf},
process::Command,
};
use vm::file_format::CompiledModule;
/// Basic datatest testing framework for the CLI. The `run_one` entrypoint expects
/// an `args.txt` file with arguments that the `move` binary understands (one set
/// of arguments per line). The testing framework runs the commands, compares the
/// result to the expected output, and runs `move clean` to discard resources,
/// modules, and event data created by running the test.
const EXP_EXT: &str = "exp";
/// If this env var is set, `move clean` will not be run after each test.
/// this is useful if you want to look at the `storage` or `move_events`
/// produced by a test. However, you'll have to manually run `move clean`
/// before re-running the test.
const NO_MOVE_CLEAN: &str = "NO_MOVE_CLEAN";
/// If either of these env vars is set, the test harness overwrites the
/// old.exp files with the output instead of checking them against the
/// output.
const UPDATE_BASELINE: &str = "UPDATE_BASELINE";
const UB: &str = "UB";
/// The filename that contains the arguments to the Move binary.
pub const TEST_ARGS_FILENAME: &str = "args.txt";
/// Name of the environment variable we need to set in order to get tracing
/// enabled in the move VM.
const MOVE_VM_TRACING_ENV_VAR_NAME: &str = "MOVE_VM_TRACE";
/// The default file name (inside the build output dir) for the runtime to
/// dump the execution trace to. The trace will be used by the coverage tool
/// if --track-cov is set. If --track-cov is not set, then no trace file will
/// be produced.
const DEFAULT_TRACE_FILE: &str = "trace";
fn format_diff(expected: String, actual: String) -> String {
use difference::*;
let changeset = Changeset::new(&expected, &actual, "\n");
let mut ret = String::new();
for seq in changeset.diffs {
match &seq {
Difference::Same(x) => {
ret.push_str(x);
ret.push('\n');
}
Difference::Add(x) => {
ret.push_str("\x1B[92m");
ret.push_str(x);
ret.push_str("\x1B[0m");
ret.push('\n');
}
Difference::Rem(x) => {
ret.push_str("\x1B[91m");
ret.push_str(x);
ret.push_str("\x1B[0m");
ret.push('\n');
}
}
}
ret
}
fn collect_coverage(
trace_file: &Path,
build_dir: &Path,
storage_dir: &Path,
) -> anyhow::Result<ExecCoverageMapWithModules> {
fn find_compiled_move_filenames(path: &Path) -> anyhow::Result<Vec<String>> {
if path.exists() {
move_lang::find_filenames(&[path_to_string(path)?], |fpath| {
extension_equals(fpath, MOVE_COMPILED_EXTENSION)
})
} else {
Ok(vec![])
}
}
// collect modules compiled for packages (to be filtered out)
let pkg_modules: HashSet<_> =
find_compiled_move_filenames(&build_dir.join(DEFAULT_PACKAGE_DIR))?
.into_iter()
.map(|entry| PathBuf::from(entry).file_name().unwrap().to_owned())
.collect();
// collect modules published minus modules compiled for packages
let src_module_files = move_lang::find_filenames(&[path_to_string(storage_dir)?], |fpath| {
extension_equals(fpath, MOVE_COMPILED_EXTENSION)
&&!pkg_modules.contains(fpath.file_name().unwrap())
})?;
let src_modules = src_module_files
.iter()
.map(|entry| {
let bytecode_bytes = fs::read(entry)?;
let compiled_module = CompiledModule::deserialize(&bytecode_bytes)
.map_err(|e| anyhow!("Failure deserializing module {:?}: {:?}", entry, e))?;
// use absolute path to the compiled module file
let module_absolute_path = path_to_string(&PathBuf::from(entry).canonicalize()?)?;
Ok((module_absolute_path, compiled_module))
})
.collect::<anyhow::Result<HashMap<_, _>>>()?;
// build the filter
let mut filter = BTreeMap::new();
for (entry, module) in src_modules.into_iter() {
let module_id = module.self_id();
filter
.entry(*module_id.address())
.or_insert_with(BTreeMap::new)
.insert(module_id.name().to_owned(), (entry, module));
}
// collect filtered trace
let coverage_map = CoverageMap::from_trace_file(trace_file)
.to_unified_exec_map()
.into_coverage_map_with_modules(filter);
Ok(coverage_map)
}
/// Run the `args_path` batch file with`cli_binary`
pub fn run_one(
args_path: &Path,
cli_binary: &str,
track_cov: bool,
) -> anyhow::Result<Option<ExecCoverageMapWithModules>> {
let args_file = io::BufReader::new(File::open(args_path)?).lines();
// path where we will run the binary
let exe_dir = args_path.parent().unwrap();
let cli_binary_path = Path::new(cli_binary).canonicalize()?;
let storage_dir = Path::new(exe_dir).join(DEFAULT_STORAGE_DIR);
let build_output = Path::new(exe_dir).join(DEFAULT_BUILD_DIR);
if storage_dir.exists() || build_output.exists() {
// need to clean before testing
Command::new(cli_binary_path.clone())
.current_dir(exe_dir)
.arg("clean")
.output()?;
}
let mut output = "".to_string();
// for tracing file path: always use the absolute path so we do not need to worry about where
// the VM is executed.
let trace_file = env::current_dir()?
.join(&build_output)
.join(DEFAULT_TRACE_FILE);
// Disable colors in error reporting from the Move compiler
env::set_var(COLOR_MODE_ENV_VAR, "NONE");
for args_line in args_file {
let args_line = args_line?;
if args_line.starts_with('#') {
// allow comments in args.txt
continue;
}
let args_iter: Vec<&str> = args_line.split_whitespace().collect();
if args_iter.is_empty() {
// allow blank lines in args.txt
continue;
}
// enable tracing in the VM by setting the env var.
if track_cov {
env::set_var(MOVE_VM_TRACING_ENV_VAR_NAME, trace_file.as_os_str());
} else if env::var_os(MOVE_VM_TRACING_ENV_VAR_NAME).is_some() {
// this check prevents cascading the coverage tracking flag.
// in particular, if
// 1. we run with move-cli test <path-to-args-A.txt> --track-cov, and
// 2. in this <args-A.txt>, there is another command: test <args-B.txt>
// then, when running <args-B.txt>, coverage will not be tracked nor printed
env::remove_var(MOVE_VM_TRACING_ENV_VAR_NAME);
}
let cmd_output = Command::new(cli_binary_path.clone())
.current_dir(exe_dir)
.args(args_iter)
.output()?;
output += &format!("Command `{}`:\n", args_line);
output += std::str::from_utf8(&cmd_output.stdout)?;
output += std::str::from_utf8(&cmd_output.stderr)?;
}
// collect coverage information
let cov_info = if track_cov && trace_file.exists() {
if!trace_file.exists() {
eprintln!(
"Trace file {:?} not found: coverage is only available with at least one `run` \
command in the args.txt (after a `clean`, if there is one)",
trace_file
);
None
} else {
Some(collect_coverage(&trace_file, &build_output, &storage_dir)?)
}
} else {
None
};
// post-test cleanup and cleanup checks
// check that the test command didn't create a src dir
let run_move_clean =!read_bool_env_var(NO_MOVE_CLEAN);
if run_move_clean {
// run `move clean` to ensure that temporary state is cleaned up
Command::new(cli_binary_path)
.current_dir(exe_dir)
.arg("clean")
.output()?;
// check that storage was deleted
assert!(
!storage_dir.exists(),
"`move clean` failed to eliminate {} directory",
DEFAULT_STORAGE_DIR
);
assert!(
!storage_dir.exists(),
"`move clean` failed to eliminate {} directory",
DEFAULT_BUILD_DIR
);
}
let update_baseline = read_bool_env_var(UPDATE_BASELINE) || read_bool_env_var(UB);
let exp_path = args_path.with_extension(EXP_EXT);
if update_baseline {
fs::write(exp_path, &output)?;
return Ok(cov_info);
}
// compare output and exp_file
let expected_output = fs::read_to_string(exp_path).unwrap_or_else(|_| "".to_string());
if expected_output!= output {
anyhow::bail!(
"Expected output differs from actual output:\n{}",
format_diff(expected_output, output)
)
} else {
Ok(cov_info)
}
}
pub fn run_all(args_path: &str, cli_binary: &str, track_cov: bool) -> anyhow::Result<()> {
let mut test_total: u64 = 0;
let mut test_passed: u64 = 0;
let mut cov_info = ExecCoverageMapWithModules::empty();
// find `args.txt` and iterate over them
for entry in move_lang::find_filenames(&[args_path.to_owned()], |fpath| {
fpath.file_name().expect("unexpected file entry path") == TEST_ARGS_FILENAME
})? {
match run_one(Path::new(&entry), cli_binary, track_cov) {
Ok(cov_opt) => {
test_passed = test_passed.checked_add(1).unwrap();
if let Some(cov) = cov_opt {
cov_info.merge(cov);
}
}
Err(ex) => eprintln!("Test {} failed with error: {}", entry, ex),
}
test_total = test_total.checked_add(1).unwrap();
}
println!("{} / {} test(s) passed.", test_passed, test_total);
// if any test fails, bail
let test_failed = test_total.checked_sub(test_passed).unwrap();
if test_failed!= 0 {
anyhow::bail!("{} / {} test(s) failed.", test_failed, test_total)
}
// show coverage information if requested
if track_cov {
let mut summary_writer: Box<dyn Write> = Box::new(io::stdout());
for (_, module_summary) in cov_info.into_module_summaries() {
module_summary.summarize_human(&mut summary_writer, true)?;
}
}
Ok(())
}
/// Create a directory scaffold for writing a Move CLI test.
pub fn
|
(path: &str) -> anyhow::Result<()> {
let path = Path::new(path);
if path.exists() {
anyhow::bail!("{:#?} already exists. Remove {:#?} and re-run this command if creating it as a test directory was intentional.", path, path);
}
let format_src_dir = |dir| format!("{}/{}", DEFAULT_SOURCE_DIR, dir);
let dirs = ["modules", "scripts"];
let files = [(
TEST_ARGS_FILENAME,
Some("# This is a batch file. To write an expected value test that runs `move <command1> <args1>;move <command2> <args2>`, write\n\
# `<command1> <args1>`\n\
# `<command2> <args2>`\n\
# '#' is a comment.",
),
)];
fs::create_dir_all(&path)?;
for dir in &dirs {
fs::create_dir_all(&path.canonicalize()?.join(format_src_dir(dir)))?;
}
for (file, possible_contents) in &files {
let mut file_handle = fs::File::create(path.canonicalize()?.join(file))?;
if let Some(contents) = possible_contents {
write!(file_handle, "{}", contents)?;
}
}
Ok(())
}
|
create_test_scaffold
|
identifier_name
|
test.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{DEFAULT_BUILD_DIR, DEFAULT_PACKAGE_DIR, DEFAULT_SOURCE_DIR, DEFAULT_STORAGE_DIR};
use anyhow::anyhow;
use move_coverage::coverage_map::{CoverageMap, ExecCoverageMapWithModules};
use move_lang::{
command_line::{read_bool_env_var, COLOR_MODE_ENV_VAR},
extension_equals, path_to_string, MOVE_COMPILED_EXTENSION,
};
use std::{
collections::{BTreeMap, HashMap, HashSet},
env,
fs::{self, File},
io::{self, BufRead, Write},
path::{Path, PathBuf},
process::Command,
};
use vm::file_format::CompiledModule;
/// Basic datatest testing framework for the CLI. The `run_one` entrypoint expects
/// an `args.txt` file with arguments that the `move` binary understands (one set
/// of arguments per line). The testing framework runs the commands, compares the
/// result to the expected output, and runs `move clean` to discard resources,
/// modules, and event data created by running the test.
const EXP_EXT: &str = "exp";
/// If this env var is set, `move clean` will not be run after each test.
/// this is useful if you want to look at the `storage` or `move_events`
/// produced by a test. However, you'll have to manually run `move clean`
/// before re-running the test.
const NO_MOVE_CLEAN: &str = "NO_MOVE_CLEAN";
/// If either of these env vars is set, the test harness overwrites the
/// old.exp files with the output instead of checking them against the
/// output.
const UPDATE_BASELINE: &str = "UPDATE_BASELINE";
const UB: &str = "UB";
/// The filename that contains the arguments to the Move binary.
pub const TEST_ARGS_FILENAME: &str = "args.txt";
/// Name of the environment variable we need to set in order to get tracing
/// enabled in the move VM.
const MOVE_VM_TRACING_ENV_VAR_NAME: &str = "MOVE_VM_TRACE";
/// The default file name (inside the build output dir) for the runtime to
/// dump the execution trace to. The trace will be used by the coverage tool
/// if --track-cov is set. If --track-cov is not set, then no trace file will
/// be produced.
const DEFAULT_TRACE_FILE: &str = "trace";
fn format_diff(expected: String, actual: String) -> String {
use difference::*;
let changeset = Changeset::new(&expected, &actual, "\n");
let mut ret = String::new();
for seq in changeset.diffs {
match &seq {
Difference::Same(x) => {
ret.push_str(x);
ret.push('\n');
}
Difference::Add(x) => {
ret.push_str("\x1B[92m");
ret.push_str(x);
ret.push_str("\x1B[0m");
ret.push('\n');
}
Difference::Rem(x) => {
ret.push_str("\x1B[91m");
ret.push_str(x);
ret.push_str("\x1B[0m");
ret.push('\n');
}
}
}
ret
}
fn collect_coverage(
trace_file: &Path,
build_dir: &Path,
storage_dir: &Path,
) -> anyhow::Result<ExecCoverageMapWithModules> {
fn find_compiled_move_filenames(path: &Path) -> anyhow::Result<Vec<String>> {
if path.exists() {
move_lang::find_filenames(&[path_to_string(path)?], |fpath| {
extension_equals(fpath, MOVE_COMPILED_EXTENSION)
})
} else {
Ok(vec![])
}
}
// collect modules compiled for packages (to be filtered out)
let pkg_modules: HashSet<_> =
find_compiled_move_filenames(&build_dir.join(DEFAULT_PACKAGE_DIR))?
.into_iter()
.map(|entry| PathBuf::from(entry).file_name().unwrap().to_owned())
.collect();
// collect modules published minus modules compiled for packages
let src_module_files = move_lang::find_filenames(&[path_to_string(storage_dir)?], |fpath| {
extension_equals(fpath, MOVE_COMPILED_EXTENSION)
&&!pkg_modules.contains(fpath.file_name().unwrap())
})?;
let src_modules = src_module_files
.iter()
.map(|entry| {
let bytecode_bytes = fs::read(entry)?;
let compiled_module = CompiledModule::deserialize(&bytecode_bytes)
.map_err(|e| anyhow!("Failure deserializing module {:?}: {:?}", entry, e))?;
// use absolute path to the compiled module file
let module_absolute_path = path_to_string(&PathBuf::from(entry).canonicalize()?)?;
Ok((module_absolute_path, compiled_module))
})
.collect::<anyhow::Result<HashMap<_, _>>>()?;
// build the filter
let mut filter = BTreeMap::new();
for (entry, module) in src_modules.into_iter() {
let module_id = module.self_id();
filter
.entry(*module_id.address())
.or_insert_with(BTreeMap::new)
.insert(module_id.name().to_owned(), (entry, module));
}
// collect filtered trace
let coverage_map = CoverageMap::from_trace_file(trace_file)
.to_unified_exec_map()
.into_coverage_map_with_modules(filter);
Ok(coverage_map)
}
/// Run the `args_path` batch file with`cli_binary`
pub fn run_one(
args_path: &Path,
cli_binary: &str,
track_cov: bool,
) -> anyhow::Result<Option<ExecCoverageMapWithModules>> {
let args_file = io::BufReader::new(File::open(args_path)?).lines();
// path where we will run the binary
let exe_dir = args_path.parent().unwrap();
let cli_binary_path = Path::new(cli_binary).canonicalize()?;
let storage_dir = Path::new(exe_dir).join(DEFAULT_STORAGE_DIR);
let build_output = Path::new(exe_dir).join(DEFAULT_BUILD_DIR);
if storage_dir.exists() || build_output.exists() {
// need to clean before testing
Command::new(cli_binary_path.clone())
.current_dir(exe_dir)
.arg("clean")
.output()?;
|
}
let mut output = "".to_string();
// for tracing file path: always use the absolute path so we do not need to worry about where
// the VM is executed.
let trace_file = env::current_dir()?
.join(&build_output)
.join(DEFAULT_TRACE_FILE);
// Disable colors in error reporting from the Move compiler
env::set_var(COLOR_MODE_ENV_VAR, "NONE");
for args_line in args_file {
let args_line = args_line?;
if args_line.starts_with('#') {
// allow comments in args.txt
continue;
}
let args_iter: Vec<&str> = args_line.split_whitespace().collect();
if args_iter.is_empty() {
// allow blank lines in args.txt
continue;
}
// enable tracing in the VM by setting the env var.
if track_cov {
env::set_var(MOVE_VM_TRACING_ENV_VAR_NAME, trace_file.as_os_str());
} else if env::var_os(MOVE_VM_TRACING_ENV_VAR_NAME).is_some() {
// this check prevents cascading the coverage tracking flag.
// in particular, if
// 1. we run with move-cli test <path-to-args-A.txt> --track-cov, and
// 2. in this <args-A.txt>, there is another command: test <args-B.txt>
// then, when running <args-B.txt>, coverage will not be tracked nor printed
env::remove_var(MOVE_VM_TRACING_ENV_VAR_NAME);
}
let cmd_output = Command::new(cli_binary_path.clone())
.current_dir(exe_dir)
.args(args_iter)
.output()?;
output += &format!("Command `{}`:\n", args_line);
output += std::str::from_utf8(&cmd_output.stdout)?;
output += std::str::from_utf8(&cmd_output.stderr)?;
}
// collect coverage information
let cov_info = if track_cov && trace_file.exists() {
if!trace_file.exists() {
eprintln!(
"Trace file {:?} not found: coverage is only available with at least one `run` \
command in the args.txt (after a `clean`, if there is one)",
trace_file
);
None
} else {
Some(collect_coverage(&trace_file, &build_output, &storage_dir)?)
}
} else {
None
};
// post-test cleanup and cleanup checks
// check that the test command didn't create a src dir
let run_move_clean =!read_bool_env_var(NO_MOVE_CLEAN);
if run_move_clean {
// run `move clean` to ensure that temporary state is cleaned up
Command::new(cli_binary_path)
.current_dir(exe_dir)
.arg("clean")
.output()?;
// check that storage was deleted
assert!(
!storage_dir.exists(),
"`move clean` failed to eliminate {} directory",
DEFAULT_STORAGE_DIR
);
assert!(
!storage_dir.exists(),
"`move clean` failed to eliminate {} directory",
DEFAULT_BUILD_DIR
);
}
let update_baseline = read_bool_env_var(UPDATE_BASELINE) || read_bool_env_var(UB);
let exp_path = args_path.with_extension(EXP_EXT);
if update_baseline {
fs::write(exp_path, &output)?;
return Ok(cov_info);
}
// compare output and exp_file
let expected_output = fs::read_to_string(exp_path).unwrap_or_else(|_| "".to_string());
if expected_output!= output {
anyhow::bail!(
"Expected output differs from actual output:\n{}",
format_diff(expected_output, output)
)
} else {
Ok(cov_info)
}
}
pub fn run_all(args_path: &str, cli_binary: &str, track_cov: bool) -> anyhow::Result<()> {
let mut test_total: u64 = 0;
let mut test_passed: u64 = 0;
let mut cov_info = ExecCoverageMapWithModules::empty();
// find `args.txt` and iterate over them
for entry in move_lang::find_filenames(&[args_path.to_owned()], |fpath| {
fpath.file_name().expect("unexpected file entry path") == TEST_ARGS_FILENAME
})? {
match run_one(Path::new(&entry), cli_binary, track_cov) {
Ok(cov_opt) => {
test_passed = test_passed.checked_add(1).unwrap();
if let Some(cov) = cov_opt {
cov_info.merge(cov);
}
}
Err(ex) => eprintln!("Test {} failed with error: {}", entry, ex),
}
test_total = test_total.checked_add(1).unwrap();
}
println!("{} / {} test(s) passed.", test_passed, test_total);
// if any test fails, bail
let test_failed = test_total.checked_sub(test_passed).unwrap();
if test_failed!= 0 {
anyhow::bail!("{} / {} test(s) failed.", test_failed, test_total)
}
// show coverage information if requested
if track_cov {
let mut summary_writer: Box<dyn Write> = Box::new(io::stdout());
for (_, module_summary) in cov_info.into_module_summaries() {
module_summary.summarize_human(&mut summary_writer, true)?;
}
}
Ok(())
}
/// Create a directory scaffold for writing a Move CLI test.
pub fn create_test_scaffold(path: &str) -> anyhow::Result<()> {
let path = Path::new(path);
if path.exists() {
anyhow::bail!("{:#?} already exists. Remove {:#?} and re-run this command if creating it as a test directory was intentional.", path, path);
}
let format_src_dir = |dir| format!("{}/{}", DEFAULT_SOURCE_DIR, dir);
let dirs = ["modules", "scripts"];
let files = [(
TEST_ARGS_FILENAME,
Some("# This is a batch file. To write an expected value test that runs `move <command1> <args1>;move <command2> <args2>`, write\n\
# `<command1> <args1>`\n\
# `<command2> <args2>`\n\
# '#' is a comment.",
),
)];
fs::create_dir_all(&path)?;
for dir in &dirs {
fs::create_dir_all(&path.canonicalize()?.join(format_src_dir(dir)))?;
}
for (file, possible_contents) in &files {
let mut file_handle = fs::File::create(path.canonicalize()?.join(file))?;
if let Some(contents) = possible_contents {
write!(file_handle, "{}", contents)?;
}
}
Ok(())
}
|
random_line_split
|
|
test.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{DEFAULT_BUILD_DIR, DEFAULT_PACKAGE_DIR, DEFAULT_SOURCE_DIR, DEFAULT_STORAGE_DIR};
use anyhow::anyhow;
use move_coverage::coverage_map::{CoverageMap, ExecCoverageMapWithModules};
use move_lang::{
command_line::{read_bool_env_var, COLOR_MODE_ENV_VAR},
extension_equals, path_to_string, MOVE_COMPILED_EXTENSION,
};
use std::{
collections::{BTreeMap, HashMap, HashSet},
env,
fs::{self, File},
io::{self, BufRead, Write},
path::{Path, PathBuf},
process::Command,
};
use vm::file_format::CompiledModule;
/// Basic datatest testing framework for the CLI. The `run_one` entrypoint expects
/// an `args.txt` file with arguments that the `move` binary understands (one set
/// of arguments per line). The testing framework runs the commands, compares the
/// result to the expected output, and runs `move clean` to discard resources,
/// modules, and event data created by running the test.
const EXP_EXT: &str = "exp";
/// If this env var is set, `move clean` will not be run after each test.
/// this is useful if you want to look at the `storage` or `move_events`
/// produced by a test. However, you'll have to manually run `move clean`
/// before re-running the test.
const NO_MOVE_CLEAN: &str = "NO_MOVE_CLEAN";
/// If either of these env vars is set, the test harness overwrites the
/// old.exp files with the output instead of checking them against the
/// output.
const UPDATE_BASELINE: &str = "UPDATE_BASELINE";
const UB: &str = "UB";
/// The filename that contains the arguments to the Move binary.
pub const TEST_ARGS_FILENAME: &str = "args.txt";
/// Name of the environment variable we need to set in order to get tracing
/// enabled in the move VM.
const MOVE_VM_TRACING_ENV_VAR_NAME: &str = "MOVE_VM_TRACE";
/// The default file name (inside the build output dir) for the runtime to
/// dump the execution trace to. The trace will be used by the coverage tool
/// if --track-cov is set. If --track-cov is not set, then no trace file will
/// be produced.
const DEFAULT_TRACE_FILE: &str = "trace";
fn format_diff(expected: String, actual: String) -> String {
use difference::*;
let changeset = Changeset::new(&expected, &actual, "\n");
let mut ret = String::new();
for seq in changeset.diffs {
match &seq {
Difference::Same(x) => {
ret.push_str(x);
ret.push('\n');
}
Difference::Add(x) => {
ret.push_str("\x1B[92m");
ret.push_str(x);
ret.push_str("\x1B[0m");
ret.push('\n');
}
Difference::Rem(x) => {
ret.push_str("\x1B[91m");
ret.push_str(x);
ret.push_str("\x1B[0m");
ret.push('\n');
}
}
}
ret
}
fn collect_coverage(
trace_file: &Path,
build_dir: &Path,
storage_dir: &Path,
) -> anyhow::Result<ExecCoverageMapWithModules> {
fn find_compiled_move_filenames(path: &Path) -> anyhow::Result<Vec<String>> {
if path.exists() {
move_lang::find_filenames(&[path_to_string(path)?], |fpath| {
extension_equals(fpath, MOVE_COMPILED_EXTENSION)
})
} else {
Ok(vec![])
}
}
// collect modules compiled for packages (to be filtered out)
let pkg_modules: HashSet<_> =
find_compiled_move_filenames(&build_dir.join(DEFAULT_PACKAGE_DIR))?
.into_iter()
.map(|entry| PathBuf::from(entry).file_name().unwrap().to_owned())
.collect();
// collect modules published minus modules compiled for packages
let src_module_files = move_lang::find_filenames(&[path_to_string(storage_dir)?], |fpath| {
extension_equals(fpath, MOVE_COMPILED_EXTENSION)
&&!pkg_modules.contains(fpath.file_name().unwrap())
})?;
let src_modules = src_module_files
.iter()
.map(|entry| {
let bytecode_bytes = fs::read(entry)?;
let compiled_module = CompiledModule::deserialize(&bytecode_bytes)
.map_err(|e| anyhow!("Failure deserializing module {:?}: {:?}", entry, e))?;
// use absolute path to the compiled module file
let module_absolute_path = path_to_string(&PathBuf::from(entry).canonicalize()?)?;
Ok((module_absolute_path, compiled_module))
})
.collect::<anyhow::Result<HashMap<_, _>>>()?;
// build the filter
let mut filter = BTreeMap::new();
for (entry, module) in src_modules.into_iter() {
let module_id = module.self_id();
filter
.entry(*module_id.address())
.or_insert_with(BTreeMap::new)
.insert(module_id.name().to_owned(), (entry, module));
}
// collect filtered trace
let coverage_map = CoverageMap::from_trace_file(trace_file)
.to_unified_exec_map()
.into_coverage_map_with_modules(filter);
Ok(coverage_map)
}
/// Run the `args_path` batch file with`cli_binary`
pub fn run_one(
args_path: &Path,
cli_binary: &str,
track_cov: bool,
) -> anyhow::Result<Option<ExecCoverageMapWithModules>>
|
.join(DEFAULT_TRACE_FILE);
// Disable colors in error reporting from the Move compiler
env::set_var(COLOR_MODE_ENV_VAR, "NONE");
for args_line in args_file {
let args_line = args_line?;
if args_line.starts_with('#') {
// allow comments in args.txt
continue;
}
let args_iter: Vec<&str> = args_line.split_whitespace().collect();
if args_iter.is_empty() {
// allow blank lines in args.txt
continue;
}
// enable tracing in the VM by setting the env var.
if track_cov {
env::set_var(MOVE_VM_TRACING_ENV_VAR_NAME, trace_file.as_os_str());
} else if env::var_os(MOVE_VM_TRACING_ENV_VAR_NAME).is_some() {
// this check prevents cascading the coverage tracking flag.
// in particular, if
// 1. we run with move-cli test <path-to-args-A.txt> --track-cov, and
// 2. in this <args-A.txt>, there is another command: test <args-B.txt>
// then, when running <args-B.txt>, coverage will not be tracked nor printed
env::remove_var(MOVE_VM_TRACING_ENV_VAR_NAME);
}
let cmd_output = Command::new(cli_binary_path.clone())
.current_dir(exe_dir)
.args(args_iter)
.output()?;
output += &format!("Command `{}`:\n", args_line);
output += std::str::from_utf8(&cmd_output.stdout)?;
output += std::str::from_utf8(&cmd_output.stderr)?;
}
// collect coverage information
let cov_info = if track_cov && trace_file.exists() {
if!trace_file.exists() {
eprintln!(
"Trace file {:?} not found: coverage is only available with at least one `run` \
command in the args.txt (after a `clean`, if there is one)",
trace_file
);
None
} else {
Some(collect_coverage(&trace_file, &build_output, &storage_dir)?)
}
} else {
None
};
// post-test cleanup and cleanup checks
// check that the test command didn't create a src dir
let run_move_clean =!read_bool_env_var(NO_MOVE_CLEAN);
if run_move_clean {
// run `move clean` to ensure that temporary state is cleaned up
Command::new(cli_binary_path)
.current_dir(exe_dir)
.arg("clean")
.output()?;
// check that storage was deleted
assert!(
!storage_dir.exists(),
"`move clean` failed to eliminate {} directory",
DEFAULT_STORAGE_DIR
);
assert!(
!storage_dir.exists(),
"`move clean` failed to eliminate {} directory",
DEFAULT_BUILD_DIR
);
}
let update_baseline = read_bool_env_var(UPDATE_BASELINE) || read_bool_env_var(UB);
let exp_path = args_path.with_extension(EXP_EXT);
if update_baseline {
fs::write(exp_path, &output)?;
return Ok(cov_info);
}
// compare output and exp_file
let expected_output = fs::read_to_string(exp_path).unwrap_or_else(|_| "".to_string());
if expected_output!= output {
anyhow::bail!(
"Expected output differs from actual output:\n{}",
format_diff(expected_output, output)
)
} else {
Ok(cov_info)
}
}
pub fn run_all(args_path: &str, cli_binary: &str, track_cov: bool) -> anyhow::Result<()> {
let mut test_total: u64 = 0;
let mut test_passed: u64 = 0;
let mut cov_info = ExecCoverageMapWithModules::empty();
// find `args.txt` and iterate over them
for entry in move_lang::find_filenames(&[args_path.to_owned()], |fpath| {
fpath.file_name().expect("unexpected file entry path") == TEST_ARGS_FILENAME
})? {
match run_one(Path::new(&entry), cli_binary, track_cov) {
Ok(cov_opt) => {
test_passed = test_passed.checked_add(1).unwrap();
if let Some(cov) = cov_opt {
cov_info.merge(cov);
}
}
Err(ex) => eprintln!("Test {} failed with error: {}", entry, ex),
}
test_total = test_total.checked_add(1).unwrap();
}
println!("{} / {} test(s) passed.", test_passed, test_total);
// if any test fails, bail
let test_failed = test_total.checked_sub(test_passed).unwrap();
if test_failed!= 0 {
anyhow::bail!("{} / {} test(s) failed.", test_failed, test_total)
}
// show coverage information if requested
if track_cov {
let mut summary_writer: Box<dyn Write> = Box::new(io::stdout());
for (_, module_summary) in cov_info.into_module_summaries() {
module_summary.summarize_human(&mut summary_writer, true)?;
}
}
Ok(())
}
/// Create a directory scaffold for writing a Move CLI test.
pub fn create_test_scaffold(path: &str) -> anyhow::Result<()> {
let path = Path::new(path);
if path.exists() {
anyhow::bail!("{:#?} already exists. Remove {:#?} and re-run this command if creating it as a test directory was intentional.", path, path);
}
let format_src_dir = |dir| format!("{}/{}", DEFAULT_SOURCE_DIR, dir);
let dirs = ["modules", "scripts"];
let files = [(
TEST_ARGS_FILENAME,
Some("# This is a batch file. To write an expected value test that runs `move <command1> <args1>;move <command2> <args2>`, write\n\
# `<command1> <args1>`\n\
# `<command2> <args2>`\n\
# '#' is a comment.",
),
)];
fs::create_dir_all(&path)?;
for dir in &dirs {
fs::create_dir_all(&path.canonicalize()?.join(format_src_dir(dir)))?;
}
for (file, possible_contents) in &files {
let mut file_handle = fs::File::create(path.canonicalize()?.join(file))?;
if let Some(contents) = possible_contents {
write!(file_handle, "{}", contents)?;
}
}
Ok(())
}
|
{
let args_file = io::BufReader::new(File::open(args_path)?).lines();
// path where we will run the binary
let exe_dir = args_path.parent().unwrap();
let cli_binary_path = Path::new(cli_binary).canonicalize()?;
let storage_dir = Path::new(exe_dir).join(DEFAULT_STORAGE_DIR);
let build_output = Path::new(exe_dir).join(DEFAULT_BUILD_DIR);
if storage_dir.exists() || build_output.exists() {
// need to clean before testing
Command::new(cli_binary_path.clone())
.current_dir(exe_dir)
.arg("clean")
.output()?;
}
let mut output = "".to_string();
// for tracing file path: always use the absolute path so we do not need to worry about where
// the VM is executed.
let trace_file = env::current_dir()?
.join(&build_output)
|
identifier_body
|
test.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{DEFAULT_BUILD_DIR, DEFAULT_PACKAGE_DIR, DEFAULT_SOURCE_DIR, DEFAULT_STORAGE_DIR};
use anyhow::anyhow;
use move_coverage::coverage_map::{CoverageMap, ExecCoverageMapWithModules};
use move_lang::{
command_line::{read_bool_env_var, COLOR_MODE_ENV_VAR},
extension_equals, path_to_string, MOVE_COMPILED_EXTENSION,
};
use std::{
collections::{BTreeMap, HashMap, HashSet},
env,
fs::{self, File},
io::{self, BufRead, Write},
path::{Path, PathBuf},
process::Command,
};
use vm::file_format::CompiledModule;
/// Basic datatest testing framework for the CLI. The `run_one` entrypoint expects
/// an `args.txt` file with arguments that the `move` binary understands (one set
/// of arguments per line). The testing framework runs the commands, compares the
/// result to the expected output, and runs `move clean` to discard resources,
/// modules, and event data created by running the test.
const EXP_EXT: &str = "exp";
/// If this env var is set, `move clean` will not be run after each test.
/// this is useful if you want to look at the `storage` or `move_events`
/// produced by a test. However, you'll have to manually run `move clean`
/// before re-running the test.
const NO_MOVE_CLEAN: &str = "NO_MOVE_CLEAN";
/// If either of these env vars is set, the test harness overwrites the
/// old.exp files with the output instead of checking them against the
/// output.
const UPDATE_BASELINE: &str = "UPDATE_BASELINE";
const UB: &str = "UB";
/// The filename that contains the arguments to the Move binary.
pub const TEST_ARGS_FILENAME: &str = "args.txt";
/// Name of the environment variable we need to set in order to get tracing
/// enabled in the move VM.
const MOVE_VM_TRACING_ENV_VAR_NAME: &str = "MOVE_VM_TRACE";
/// The default file name (inside the build output dir) for the runtime to
/// dump the execution trace to. The trace will be used by the coverage tool
/// if --track-cov is set. If --track-cov is not set, then no trace file will
/// be produced.
const DEFAULT_TRACE_FILE: &str = "trace";
fn format_diff(expected: String, actual: String) -> String {
use difference::*;
let changeset = Changeset::new(&expected, &actual, "\n");
let mut ret = String::new();
for seq in changeset.diffs {
match &seq {
Difference::Same(x) => {
ret.push_str(x);
ret.push('\n');
}
Difference::Add(x) => {
ret.push_str("\x1B[92m");
ret.push_str(x);
ret.push_str("\x1B[0m");
ret.push('\n');
}
Difference::Rem(x) =>
|
}
}
ret
}
fn collect_coverage(
trace_file: &Path,
build_dir: &Path,
storage_dir: &Path,
) -> anyhow::Result<ExecCoverageMapWithModules> {
fn find_compiled_move_filenames(path: &Path) -> anyhow::Result<Vec<String>> {
if path.exists() {
move_lang::find_filenames(&[path_to_string(path)?], |fpath| {
extension_equals(fpath, MOVE_COMPILED_EXTENSION)
})
} else {
Ok(vec![])
}
}
// collect modules compiled for packages (to be filtered out)
let pkg_modules: HashSet<_> =
find_compiled_move_filenames(&build_dir.join(DEFAULT_PACKAGE_DIR))?
.into_iter()
.map(|entry| PathBuf::from(entry).file_name().unwrap().to_owned())
.collect();
// collect modules published minus modules compiled for packages
let src_module_files = move_lang::find_filenames(&[path_to_string(storage_dir)?], |fpath| {
extension_equals(fpath, MOVE_COMPILED_EXTENSION)
&&!pkg_modules.contains(fpath.file_name().unwrap())
})?;
let src_modules = src_module_files
.iter()
.map(|entry| {
let bytecode_bytes = fs::read(entry)?;
let compiled_module = CompiledModule::deserialize(&bytecode_bytes)
.map_err(|e| anyhow!("Failure deserializing module {:?}: {:?}", entry, e))?;
// use absolute path to the compiled module file
let module_absolute_path = path_to_string(&PathBuf::from(entry).canonicalize()?)?;
Ok((module_absolute_path, compiled_module))
})
.collect::<anyhow::Result<HashMap<_, _>>>()?;
// build the filter
let mut filter = BTreeMap::new();
for (entry, module) in src_modules.into_iter() {
let module_id = module.self_id();
filter
.entry(*module_id.address())
.or_insert_with(BTreeMap::new)
.insert(module_id.name().to_owned(), (entry, module));
}
// collect filtered trace
let coverage_map = CoverageMap::from_trace_file(trace_file)
.to_unified_exec_map()
.into_coverage_map_with_modules(filter);
Ok(coverage_map)
}
/// Run the `args_path` batch file with`cli_binary`
pub fn run_one(
args_path: &Path,
cli_binary: &str,
track_cov: bool,
) -> anyhow::Result<Option<ExecCoverageMapWithModules>> {
let args_file = io::BufReader::new(File::open(args_path)?).lines();
// path where we will run the binary
let exe_dir = args_path.parent().unwrap();
let cli_binary_path = Path::new(cli_binary).canonicalize()?;
let storage_dir = Path::new(exe_dir).join(DEFAULT_STORAGE_DIR);
let build_output = Path::new(exe_dir).join(DEFAULT_BUILD_DIR);
if storage_dir.exists() || build_output.exists() {
// need to clean before testing
Command::new(cli_binary_path.clone())
.current_dir(exe_dir)
.arg("clean")
.output()?;
}
let mut output = "".to_string();
// for tracing file path: always use the absolute path so we do not need to worry about where
// the VM is executed.
let trace_file = env::current_dir()?
.join(&build_output)
.join(DEFAULT_TRACE_FILE);
// Disable colors in error reporting from the Move compiler
env::set_var(COLOR_MODE_ENV_VAR, "NONE");
for args_line in args_file {
let args_line = args_line?;
if args_line.starts_with('#') {
// allow comments in args.txt
continue;
}
let args_iter: Vec<&str> = args_line.split_whitespace().collect();
if args_iter.is_empty() {
// allow blank lines in args.txt
continue;
}
// enable tracing in the VM by setting the env var.
if track_cov {
env::set_var(MOVE_VM_TRACING_ENV_VAR_NAME, trace_file.as_os_str());
} else if env::var_os(MOVE_VM_TRACING_ENV_VAR_NAME).is_some() {
// this check prevents cascading the coverage tracking flag.
// in particular, if
// 1. we run with move-cli test <path-to-args-A.txt> --track-cov, and
// 2. in this <args-A.txt>, there is another command: test <args-B.txt>
// then, when running <args-B.txt>, coverage will not be tracked nor printed
env::remove_var(MOVE_VM_TRACING_ENV_VAR_NAME);
}
let cmd_output = Command::new(cli_binary_path.clone())
.current_dir(exe_dir)
.args(args_iter)
.output()?;
output += &format!("Command `{}`:\n", args_line);
output += std::str::from_utf8(&cmd_output.stdout)?;
output += std::str::from_utf8(&cmd_output.stderr)?;
}
// collect coverage information
let cov_info = if track_cov && trace_file.exists() {
if!trace_file.exists() {
eprintln!(
"Trace file {:?} not found: coverage is only available with at least one `run` \
command in the args.txt (after a `clean`, if there is one)",
trace_file
);
None
} else {
Some(collect_coverage(&trace_file, &build_output, &storage_dir)?)
}
} else {
None
};
// post-test cleanup and cleanup checks
// check that the test command didn't create a src dir
let run_move_clean =!read_bool_env_var(NO_MOVE_CLEAN);
if run_move_clean {
// run `move clean` to ensure that temporary state is cleaned up
Command::new(cli_binary_path)
.current_dir(exe_dir)
.arg("clean")
.output()?;
// check that storage was deleted
assert!(
!storage_dir.exists(),
"`move clean` failed to eliminate {} directory",
DEFAULT_STORAGE_DIR
);
assert!(
!storage_dir.exists(),
"`move clean` failed to eliminate {} directory",
DEFAULT_BUILD_DIR
);
}
let update_baseline = read_bool_env_var(UPDATE_BASELINE) || read_bool_env_var(UB);
let exp_path = args_path.with_extension(EXP_EXT);
if update_baseline {
fs::write(exp_path, &output)?;
return Ok(cov_info);
}
// compare output and exp_file
let expected_output = fs::read_to_string(exp_path).unwrap_or_else(|_| "".to_string());
if expected_output!= output {
anyhow::bail!(
"Expected output differs from actual output:\n{}",
format_diff(expected_output, output)
)
} else {
Ok(cov_info)
}
}
pub fn run_all(args_path: &str, cli_binary: &str, track_cov: bool) -> anyhow::Result<()> {
let mut test_total: u64 = 0;
let mut test_passed: u64 = 0;
let mut cov_info = ExecCoverageMapWithModules::empty();
// find `args.txt` and iterate over them
for entry in move_lang::find_filenames(&[args_path.to_owned()], |fpath| {
fpath.file_name().expect("unexpected file entry path") == TEST_ARGS_FILENAME
})? {
match run_one(Path::new(&entry), cli_binary, track_cov) {
Ok(cov_opt) => {
test_passed = test_passed.checked_add(1).unwrap();
if let Some(cov) = cov_opt {
cov_info.merge(cov);
}
}
Err(ex) => eprintln!("Test {} failed with error: {}", entry, ex),
}
test_total = test_total.checked_add(1).unwrap();
}
println!("{} / {} test(s) passed.", test_passed, test_total);
// if any test fails, bail
let test_failed = test_total.checked_sub(test_passed).unwrap();
if test_failed!= 0 {
anyhow::bail!("{} / {} test(s) failed.", test_failed, test_total)
}
// show coverage information if requested
if track_cov {
let mut summary_writer: Box<dyn Write> = Box::new(io::stdout());
for (_, module_summary) in cov_info.into_module_summaries() {
module_summary.summarize_human(&mut summary_writer, true)?;
}
}
Ok(())
}
/// Create a directory scaffold for writing a Move CLI test.
pub fn create_test_scaffold(path: &str) -> anyhow::Result<()> {
let path = Path::new(path);
if path.exists() {
anyhow::bail!("{:#?} already exists. Remove {:#?} and re-run this command if creating it as a test directory was intentional.", path, path);
}
let format_src_dir = |dir| format!("{}/{}", DEFAULT_SOURCE_DIR, dir);
let dirs = ["modules", "scripts"];
let files = [(
TEST_ARGS_FILENAME,
Some("# This is a batch file. To write an expected value test that runs `move <command1> <args1>;move <command2> <args2>`, write\n\
# `<command1> <args1>`\n\
# `<command2> <args2>`\n\
# '#' is a comment.",
),
)];
fs::create_dir_all(&path)?;
for dir in &dirs {
fs::create_dir_all(&path.canonicalize()?.join(format_src_dir(dir)))?;
}
for (file, possible_contents) in &files {
let mut file_handle = fs::File::create(path.canonicalize()?.join(file))?;
if let Some(contents) = possible_contents {
write!(file_handle, "{}", contents)?;
}
}
Ok(())
}
|
{
ret.push_str("\x1B[91m");
ret.push_str(x);
ret.push_str("\x1B[0m");
ret.push('\n');
}
|
conditional_block
|
error.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
error_chain! {
types {
EditError, EditErrorKind, ResultExt, Result;
}
errors {
IOError {
description("IO Error")
display("IO Error")
}
NoEditor {
|
}
ProcessExitFailure {
description("Process did not exit properly")
display("Process did not exit properly")
}
InstantiateError {
description("Instantation error")
display("Instantation error")
}
}
}
|
description("No editor set")
display("No editor set")
|
random_line_split
|
p041.rs
|
//! [Problem 41](https://projecteuler.net/problem=41) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#[macro_use(problem)] extern crate common;
extern crate iter;
extern crate integer;
extern crate prime;
use iter::Permutations;
use integer::Integer;
use prime::PrimeSet;
// 1 + 2 +... + 9 = 45 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 1 + 2 +... + 8 = 36 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 7-pandigimal may be the largest pandigimal prime.
fn compute() -> u64 {
let radix = 10;
let ps = PrimeSet::new();
for (perm, _) in Permutations::new(&[7, 6, 5, 4, 3, 2, 1], 7) {
let n = Integer::from_digits(perm.iter().rev().map(|&x| x), radix);
if ps.contains(n)
|
}
unreachable!()
}
fn solve() -> String {
compute().to_string()
}
problem!("7652413", solve);
|
{
return n
}
|
conditional_block
|
p041.rs
|
//! [Problem 41](https://projecteuler.net/problem=41) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#[macro_use(problem)] extern crate common;
extern crate iter;
extern crate integer;
extern crate prime;
use iter::Permutations;
use integer::Integer;
use prime::PrimeSet;
// 1 + 2 +... + 9 = 45 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 1 + 2 +... + 8 = 36 (dividable by 9 => 9-pandigimal number is dividable by 9)
// 7-pandigimal may be the largest pandigimal prime.
fn compute() -> u64 {
let radix = 10;
let ps = PrimeSet::new();
for (perm, _) in Permutations::new(&[7, 6, 5, 4, 3, 2, 1], 7) {
let n = Integer::from_digits(perm.iter().rev().map(|&x| x), radix);
if ps.contains(n) {
return n
}
}
unreachable!()
}
fn
|
() -> String {
compute().to_string()
}
problem!("7652413", solve);
|
solve
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.