file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lint-dead-code-5.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_variables)]
#![deny(dead_code)]
enum Enum1 {
Variant1(isize),
Variant2 //~ ERROR: variant is never used
}
enum Enum2 {
Variant3(bool),
#[allow(dead_code)]
Variant4(isize),
Variant5 { _x: isize }, //~ ERROR: variant is never used: `Variant5`
Variant6(isize), //~ ERROR: variant is never used: `Variant6`
_Variant7,
}
enum Enum3 { //~ ERROR: enum is never used
Variant8,
Variant9
}
fn
|
() {
let v = Enum1::Variant1(1);
match v {
Enum1::Variant1(_) => (),
Enum1::Variant2 => ()
}
let x = Enum2::Variant3(true);
}
|
main
|
identifier_name
|
timeout.rs
|
extern crate desync;
use std::thread;
use std::time::*;
use std::sync::mpsc::*;
use desync::scheduler::*;
pub fn timeout<TFn:'static+Send+FnOnce() -> ()>(action: TFn, millis: u64) {
enum ThreadState {
Ok,
Timeout,
Panic
};
let (tx, rx) = channel();
let (tx1, tx2) = (tx.clone(), tx.clone());
thread::Builder::new()
.name("test timeout thread".to_string())
.spawn(move || {
struct DetectPanic(Sender<ThreadState>);
impl Drop for DetectPanic {
fn drop(&mut self) {
if thread::panicking() {
self.0.send(ThreadState::Panic).ok();
}
}
}
let _detectpanic = DetectPanic(tx1.clone());
action();
tx1.send(ThreadState::Ok).ok();
})
.expect("Create timeout run thread");
let (timer_done, timer_done_recv) = channel();
let timer = thread::Builder::new()
.name("timeout thread".to_string())
.spawn(move || {
let done = timer_done_recv.recv_timeout(Duration::from_millis(millis));
if done.is_err() {
tx2.send(ThreadState::Timeout).ok();
}
}).expect("Create timeout timer thread");
match rx.recv().expect("Receive timeout status") {
ThreadState::Ok => {
// Stop the timer thread
timer_done.send(()).expect("Stop timer");
timer.join().expect("Wait for timer to stop");
},
ThreadState::Timeout =>
|
,
ThreadState::Panic => {
println!("{:?}", scheduler());
panic!("Timed thread panicked");
}
}
}
|
{
println!("{:?}", scheduler());
panic!("Timeout");
}
|
conditional_block
|
timeout.rs
|
extern crate desync;
use std::thread;
use std::time::*;
use std::sync::mpsc::*;
use desync::scheduler::*;
pub fn timeout<TFn:'static+Send+FnOnce() -> ()>(action: TFn, millis: u64) {
enum ThreadState {
Ok,
Timeout,
Panic
};
let (tx, rx) = channel();
let (tx1, tx2) = (tx.clone(), tx.clone());
thread::Builder::new()
.name("test timeout thread".to_string())
.spawn(move || {
struct DetectPanic(Sender<ThreadState>);
impl Drop for DetectPanic {
fn drop(&mut self) {
if thread::panicking() {
self.0.send(ThreadState::Panic).ok();
}
}
}
let _detectpanic = DetectPanic(tx1.clone());
action();
tx1.send(ThreadState::Ok).ok();
})
.expect("Create timeout run thread");
let (timer_done, timer_done_recv) = channel();
let timer = thread::Builder::new()
.name("timeout thread".to_string())
.spawn(move || {
let done = timer_done_recv.recv_timeout(Duration::from_millis(millis));
if done.is_err() {
tx2.send(ThreadState::Timeout).ok();
}
}).expect("Create timeout timer thread");
|
ThreadState::Ok => {
// Stop the timer thread
timer_done.send(()).expect("Stop timer");
timer.join().expect("Wait for timer to stop");
},
ThreadState::Timeout => {
println!("{:?}", scheduler());
panic!("Timeout");
},
ThreadState::Panic => {
println!("{:?}", scheduler());
panic!("Timed thread panicked");
}
}
}
|
match rx.recv().expect("Receive timeout status") {
|
random_line_split
|
timeout.rs
|
extern crate desync;
use std::thread;
use std::time::*;
use std::sync::mpsc::*;
use desync::scheduler::*;
pub fn timeout<TFn:'static+Send+FnOnce() -> ()>(action: TFn, millis: u64) {
enum
|
{
Ok,
Timeout,
Panic
};
let (tx, rx) = channel();
let (tx1, tx2) = (tx.clone(), tx.clone());
thread::Builder::new()
.name("test timeout thread".to_string())
.spawn(move || {
struct DetectPanic(Sender<ThreadState>);
impl Drop for DetectPanic {
fn drop(&mut self) {
if thread::panicking() {
self.0.send(ThreadState::Panic).ok();
}
}
}
let _detectpanic = DetectPanic(tx1.clone());
action();
tx1.send(ThreadState::Ok).ok();
})
.expect("Create timeout run thread");
let (timer_done, timer_done_recv) = channel();
let timer = thread::Builder::new()
.name("timeout thread".to_string())
.spawn(move || {
let done = timer_done_recv.recv_timeout(Duration::from_millis(millis));
if done.is_err() {
tx2.send(ThreadState::Timeout).ok();
}
}).expect("Create timeout timer thread");
match rx.recv().expect("Receive timeout status") {
ThreadState::Ok => {
// Stop the timer thread
timer_done.send(()).expect("Stop timer");
timer.join().expect("Wait for timer to stop");
},
ThreadState::Timeout => {
println!("{:?}", scheduler());
panic!("Timeout");
},
ThreadState::Panic => {
println!("{:?}", scheduler());
panic!("Timed thread panicked");
}
}
}
|
ThreadState
|
identifier_name
|
main.rs
|
/* diosix hypervisor main entry code
*
* (c) Chris Williams, 2019-2021.
*
* See LICENSE for usage and copying.
*/
/* let the compiler know we're on our own here in bare-metal world */
#![no_std]
#![no_main]
#![feature(asm)]
#![allow(dead_code)]
#![allow(unused_unsafe)]
#![allow(improper_ctypes)]
#![feature(type_ascription)]
/* provide a framework for unit testing */
#![feature(custom_test_frameworks)]
#![test_runner(crate::run_tests)]
#![reexport_test_harness_main = "hvtests"] /* entry point for tests */
/* plug our custom heap allocator into the Rust language: Box, etc */
#![feature(alloc_error_handler)]
#![feature(box_syntax)]
#[allow(unused_imports)]
#[macro_use]
extern crate alloc;
/* needed to convert raw dtb pointer into a slice */
use core::slice;
/* needed for fast lookup tables of stuff */
extern crate hashbrown;
/* needed for elf parsing */
extern crate xmas_elf;
/* needed for device tree parsing and manipulation */
extern crate devicetree;
/* needed for parsing diosix manifest file-system (DMFS) images bundled with the hypervisor */
extern crate dmfs;
/* needed for lazyily-allocated static variables */
#[macro_use]
extern crate lazy_static;
/* this will bring in all the hardware-specific code */
extern crate platform;
/* and now for all our non-hw specific code */
#[macro_use]
mod debug; /* get us some kind of debug output, typically to a serial port */
#[macro_use]
mod capsule; /* manage capsules */
#[macro_use]
mod heap; /* per-CPU private heap management */
#[macro_use]
mod physmem; /* manage host physical memory */
mod hardware; /* parse device trees into hardware objects */
mod panic; /* implement panic() handlers */
mod irq; /* handle hw interrupts and sw exceptions, collectively known as IRQs */
mod virtmem; /* manage capsule virtual memory */
mod pcore; /* manage CPU cores */
mod vcore; /* virtual CPU core management... */
mod scheduler; /*...and scheduling */
mod loader; /* parse and load supervisor binaries */
mod message; /* send messages between physical cores */
mod service; /* allow capsules to register services */
mod manifest; /* manage capsules loaded with the hypervisor */
/* needed for exclusive locks */
mod lock;
use lock::Mutex;
/* list of error codes */
mod error;
use error::Cause;
use pcore::{PhysicalCoreID, BOOT_PCORE_ID};
/* tell Rust to use our HVallocator to allocate and free heap memory.
although we'll keep track of physical memory, we'll let Rust perform essential
tasks, such as dropping objects when it's no longer needed, borrow checking, etc */
#[global_allocator]
static HV_HEAP: heap::HVallocator = heap::HVallocator;
lazy_static!
{
/* set to true to allow physical CPU cores to start running supervisor code */
static ref INIT_DONE: Mutex<bool> = Mutex::new("system bring-up", false);
/* a physical CPU core obtaining this lock when it is false must walk the DMFS, create
capsules required to run at boot time, and set the flag to true. any other core
obtaining it as true must release the lock and move on */
static ref MANIFEST_UNPACKED: Mutex<bool> = Mutex::new("dmfs unpacked", false);
/* set to true if individual cores can sound off their presence and capabilities */
static ref ROLL_CALL: Mutex<bool> = Mutex::new("CPU roll call", false);
}
/* pointer sizes: stick to usize as much as possible: don't always assume it's a 64-bit machine */
/* hventry
This is the official entry point of the Rust-level hypervisor.
Call hvmain, which is where all the real work happens, and catch any errors.
=> cpu_nr = this boot-assigned CPU ID number
dtb_ptr = pointer to start of device tree blob structure
dtb_len = 32-bit big-endian length of the device tree blob
<= return to infinite loop, awaiting interrupts */
#[no_mangle]
pub extern "C" fn hventry(cpu_nr: PhysicalCoreID, dtb_ptr: *const u8, dtb_len: u32)
{
/* carry out tests if that's what we're here for */
#[cfg(test)]
hvtests();
/* if not performing tests, start the system as normal */
match hvmain(cpu_nr, dtb_ptr, dtb_len)
{
Err(e) =>
{
hvalert!("Hypervisor failed to start. Reason: {:?}", e);
debughousekeeper!(); /* attempt to flush queued debug to output */
},
_ => () /* continue waiting for an IRQ to come in */
}
}
/* hvmain
This code runs at the hypervisor level, with full physical memory access.
Its job is to initialize physical CPU cores and other resources so that capsules can be
created in which supervisors run that manage their own user spaces, in which
applications run. The hypervisor ensures capsules are kept apart using
hardware protections.
Assumes all physical CPU cores enter this function during startup.
The boot CPU is chosen to initialize the system in pre-SMP mode.
If we're on a single CPU core then everything should still run OK.
Assumes hardware and exception interrupts are enabled and handlers
installed.
Also assumes all CPU cores are compatible ISA-wise. There is provision
for marking some cores as more powerful than others for systems with
a mix of performance and efficiency CPU cores.
=> cpu_nr = arbitrary CPU core ID number assigned by boot code,
separate from hardware ID number.
BOOT_PCORE_ID = boot CPU core.
dtb_ptr = pointer to device tree in memory from bootlaoder
dtb_len = 32-bit big endian size of the device tree
<= return to infinite loop, waiting for interrupts
*/
fn hvmain(cpu_nr: PhysicalCoreID, dtb_ptr: *const u8, dtb_len: u32) -> Result<(), Cause>
{
/* set up each physical processor core with its own private heap pool and any other resources.
each private pool uses physical memory assigned by the pre-hvmain boot code. init() should be called
first thing to set up each processor core, including the boot CPU, which then sets up the global
resources. all non-boot CPUs should wait until global resources are ready. */
pcore::PhysicalCore::init(cpu_nr);
/* note that pre-physmem::init(), CPU cores rely on their pre-hventry()-assigned
heap space. after physmem::init(), CPU cores can extend their heaps using physical memory.
the hypervisor will become stuck pre-physmem::init() if it goes beyond its assigned heap space. */
match cpu_nr
{
/* delegate to boot CPU the welcome banner and set up global resources.
note: the platform code should ensure whichever CPU core is assigned
BOOT_PCORE_ID as its cpu_nr can initialize the hypervisor */
BOOT_PCORE_ID =>
{
/* convert the dtb pointer into a rust byte slice. assumes dtb_len is valid */
let dtb = unsafe { slice::from_raw_parts(dtb_ptr, u32::from_be(dtb_len) as usize) };
/* process device tree to create data structures representing system hardware,
allowing these peripherals to be accessed by subsequent routines. this should
also initialize any found hardware */
hardware::parse_and_init(dtb)?;
/* register all the available physical RAM */
physmem::init()?;
describe_system();
/* allow other cores to continue */
*(INIT_DONE.lock()) = true;
},
/* non-boot cores must wait here for early initialization to complete */
_ => while *(INIT_DONE.lock())!= true {}
}
/* Create capsules to run from the bundled DMFS image.
the hypervisor can't make any assumptions about the underlying hardware.
the device tree for these early capsules is derived from the host's device tree,
modified to virtualize peripherals. the virtual CPU cores will based on the
physical CPU core that creates it. this is more straightforward than the hypervisor
trying to specify a hypothetical CPU core
as such, only allow supervisor-mode capable CPU cores to build capasules */
if pcore::PhysicalCore::smode_supported() == true
{
/* only allow one core to do the unpacking */
let mut flag = MANIFEST_UNPACKED.lock();
if *flag == false
{
/* process the manifest and mark it as handled */
manifest::unpack_at_boot()?;
*flag = true;
/* allow all working cores to join the roll call */
*(ROLL_CALL.lock()) = true;
}
}
/* once ROLL_CALL is set to true, acknowledge we're alive and well, and report CPU core features */
while *(ROLL_CALL.lock())!= true {}
hvdebug!("Physical CPU core {:?} ready to roll", pcore::PhysicalCore::describe());
/* enable timer on this physical CPU core to start scheduling and running virtual cores */
scheduler::start()?;
/* initialization complete. fall through to infinite loop waiting for a timer interrupt
to come in. when it does fire, this stack will be flattened, a virtual CPU loaded up to run,
and this boot thread will disappear. thus, the call to start() should be the last thing
this boot thread does */
Ok(())
}
/* dump system information to the user */
fn describe_system()
{
const KILOBYTE: usize = 1024;
const MEGABYTE: usize = KILOBYTE * KILOBYTE;
const GIGABYTE: usize = KILOBYTE * MEGABYTE;
/* say hello via the debug port with some information */
hvdebug!("Diosix {} :: Debug enabled. {} and {} RAM found",
/* build version number */
env!("CARGO_PKG_VERSION"),
/* report number of CPU cores found */
match hardware::get_nr_cpu_cores()
{
None | Some(0) => format!("no CPU cores"),
Some(1) => format!("1 CPU core"),
Some(c) => format!("{} CPU cores", c)
},
/* count up total system RAM using GiB / MiB / KiB */
match hardware::get_phys_ram_total()
{
Some(t) => if t >= GIGABYTE
{
format!("{} GiB", t / GIGABYTE)
}
else if t >= MEGABYTE
{
format!("{} MiB", t / MEGABYTE)
}
else
{
format!("{} KiB", t / KILOBYTE)
},
None => format!("no")
});
}
/* mandatory error handler for memory allocations */
#[alloc_error_handler]
fn hvalloc_error(attempt: core::alloc::Layout) ->!
{
let heap = &(*<pcore::PhysicalCore>::this()).heap;
hvalert!("hvalloc_error: Failed to allocate/free {} bytes. Heap: {:?}", attempt.size(), heap);
debughousekeeper!();
loop {} /* it would be nice to be able to not die here :( */
}
/* perform all unit tests required */
#[cfg(test)]
fn run_tests(unit_tests: &[&dyn Fn()])
{
/* run each test one by one */
for test in unit_tests
{
test();
}
/* exit cleanly once tests are complete */
platform::test::end(Ok(0));
}
|
#[test_case]
fn test_assertion()
{
assert_eq!(42, 42);
}
|
random_line_split
|
|
main.rs
|
/* diosix hypervisor main entry code
*
* (c) Chris Williams, 2019-2021.
*
* See LICENSE for usage and copying.
*/
/* let the compiler know we're on our own here in bare-metal world */
#![no_std]
#![no_main]
#![feature(asm)]
#![allow(dead_code)]
#![allow(unused_unsafe)]
#![allow(improper_ctypes)]
#![feature(type_ascription)]
/* provide a framework for unit testing */
#![feature(custom_test_frameworks)]
#![test_runner(crate::run_tests)]
#![reexport_test_harness_main = "hvtests"] /* entry point for tests */
/* plug our custom heap allocator into the Rust language: Box, etc */
#![feature(alloc_error_handler)]
#![feature(box_syntax)]
#[allow(unused_imports)]
#[macro_use]
extern crate alloc;
/* needed to convert raw dtb pointer into a slice */
use core::slice;
/* needed for fast lookup tables of stuff */
extern crate hashbrown;
/* needed for elf parsing */
extern crate xmas_elf;
/* needed for device tree parsing and manipulation */
extern crate devicetree;
/* needed for parsing diosix manifest file-system (DMFS) images bundled with the hypervisor */
extern crate dmfs;
/* needed for lazyily-allocated static variables */
#[macro_use]
extern crate lazy_static;
/* this will bring in all the hardware-specific code */
extern crate platform;
/* and now for all our non-hw specific code */
#[macro_use]
mod debug; /* get us some kind of debug output, typically to a serial port */
#[macro_use]
mod capsule; /* manage capsules */
#[macro_use]
mod heap; /* per-CPU private heap management */
#[macro_use]
mod physmem; /* manage host physical memory */
mod hardware; /* parse device trees into hardware objects */
mod panic; /* implement panic() handlers */
mod irq; /* handle hw interrupts and sw exceptions, collectively known as IRQs */
mod virtmem; /* manage capsule virtual memory */
mod pcore; /* manage CPU cores */
mod vcore; /* virtual CPU core management... */
mod scheduler; /*...and scheduling */
mod loader; /* parse and load supervisor binaries */
mod message; /* send messages between physical cores */
mod service; /* allow capsules to register services */
mod manifest; /* manage capsules loaded with the hypervisor */
/* needed for exclusive locks */
mod lock;
use lock::Mutex;
/* list of error codes */
mod error;
use error::Cause;
use pcore::{PhysicalCoreID, BOOT_PCORE_ID};
/* tell Rust to use our HVallocator to allocate and free heap memory.
although we'll keep track of physical memory, we'll let Rust perform essential
tasks, such as dropping objects when it's no longer needed, borrow checking, etc */
#[global_allocator]
static HV_HEAP: heap::HVallocator = heap::HVallocator;
lazy_static!
{
/* set to true to allow physical CPU cores to start running supervisor code */
static ref INIT_DONE: Mutex<bool> = Mutex::new("system bring-up", false);
/* a physical CPU core obtaining this lock when it is false must walk the DMFS, create
capsules required to run at boot time, and set the flag to true. any other core
obtaining it as true must release the lock and move on */
static ref MANIFEST_UNPACKED: Mutex<bool> = Mutex::new("dmfs unpacked", false);
/* set to true if individual cores can sound off their presence and capabilities */
static ref ROLL_CALL: Mutex<bool> = Mutex::new("CPU roll call", false);
}
/* pointer sizes: stick to usize as much as possible: don't always assume it's a 64-bit machine */
/* hventry
This is the official entry point of the Rust-level hypervisor.
Call hvmain, which is where all the real work happens, and catch any errors.
=> cpu_nr = this boot-assigned CPU ID number
dtb_ptr = pointer to start of device tree blob structure
dtb_len = 32-bit big-endian length of the device tree blob
<= return to infinite loop, awaiting interrupts */
#[no_mangle]
pub extern "C" fn hventry(cpu_nr: PhysicalCoreID, dtb_ptr: *const u8, dtb_len: u32)
{
/* carry out tests if that's what we're here for */
#[cfg(test)]
hvtests();
/* if not performing tests, start the system as normal */
match hvmain(cpu_nr, dtb_ptr, dtb_len)
{
Err(e) =>
{
hvalert!("Hypervisor failed to start. Reason: {:?}", e);
debughousekeeper!(); /* attempt to flush queued debug to output */
},
_ => () /* continue waiting for an IRQ to come in */
}
}
/* hvmain
This code runs at the hypervisor level, with full physical memory access.
Its job is to initialize physical CPU cores and other resources so that capsules can be
created in which supervisors run that manage their own user spaces, in which
applications run. The hypervisor ensures capsules are kept apart using
hardware protections.
Assumes all physical CPU cores enter this function during startup.
The boot CPU is chosen to initialize the system in pre-SMP mode.
If we're on a single CPU core then everything should still run OK.
Assumes hardware and exception interrupts are enabled and handlers
installed.
Also assumes all CPU cores are compatible ISA-wise. There is provision
for marking some cores as more powerful than others for systems with
a mix of performance and efficiency CPU cores.
=> cpu_nr = arbitrary CPU core ID number assigned by boot code,
separate from hardware ID number.
BOOT_PCORE_ID = boot CPU core.
dtb_ptr = pointer to device tree in memory from bootlaoder
dtb_len = 32-bit big endian size of the device tree
<= return to infinite loop, waiting for interrupts
*/
fn hvmain(cpu_nr: PhysicalCoreID, dtb_ptr: *const u8, dtb_len: u32) -> Result<(), Cause>
{
/* set up each physical processor core with its own private heap pool and any other resources.
each private pool uses physical memory assigned by the pre-hvmain boot code. init() should be called
first thing to set up each processor core, including the boot CPU, which then sets up the global
resources. all non-boot CPUs should wait until global resources are ready. */
pcore::PhysicalCore::init(cpu_nr);
/* note that pre-physmem::init(), CPU cores rely on their pre-hventry()-assigned
heap space. after physmem::init(), CPU cores can extend their heaps using physical memory.
the hypervisor will become stuck pre-physmem::init() if it goes beyond its assigned heap space. */
match cpu_nr
{
/* delegate to boot CPU the welcome banner and set up global resources.
note: the platform code should ensure whichever CPU core is assigned
BOOT_PCORE_ID as its cpu_nr can initialize the hypervisor */
BOOT_PCORE_ID =>
{
/* convert the dtb pointer into a rust byte slice. assumes dtb_len is valid */
let dtb = unsafe { slice::from_raw_parts(dtb_ptr, u32::from_be(dtb_len) as usize) };
/* process device tree to create data structures representing system hardware,
allowing these peripherals to be accessed by subsequent routines. this should
also initialize any found hardware */
hardware::parse_and_init(dtb)?;
/* register all the available physical RAM */
physmem::init()?;
describe_system();
/* allow other cores to continue */
*(INIT_DONE.lock()) = true;
},
/* non-boot cores must wait here for early initialization to complete */
_ => while *(INIT_DONE.lock())!= true {}
}
/* Create capsules to run from the bundled DMFS image.
the hypervisor can't make any assumptions about the underlying hardware.
the device tree for these early capsules is derived from the host's device tree,
modified to virtualize peripherals. the virtual CPU cores will based on the
physical CPU core that creates it. this is more straightforward than the hypervisor
trying to specify a hypothetical CPU core
as such, only allow supervisor-mode capable CPU cores to build capasules */
if pcore::PhysicalCore::smode_supported() == true
{
/* only allow one core to do the unpacking */
let mut flag = MANIFEST_UNPACKED.lock();
if *flag == false
{
/* process the manifest and mark it as handled */
manifest::unpack_at_boot()?;
*flag = true;
/* allow all working cores to join the roll call */
*(ROLL_CALL.lock()) = true;
}
}
/* once ROLL_CALL is set to true, acknowledge we're alive and well, and report CPU core features */
while *(ROLL_CALL.lock())!= true {}
hvdebug!("Physical CPU core {:?} ready to roll", pcore::PhysicalCore::describe());
/* enable timer on this physical CPU core to start scheduling and running virtual cores */
scheduler::start()?;
/* initialization complete. fall through to infinite loop waiting for a timer interrupt
to come in. when it does fire, this stack will be flattened, a virtual CPU loaded up to run,
and this boot thread will disappear. thus, the call to start() should be the last thing
this boot thread does */
Ok(())
}
/* dump system information to the user */
fn describe_system()
{
const KILOBYTE: usize = 1024;
const MEGABYTE: usize = KILOBYTE * KILOBYTE;
const GIGABYTE: usize = KILOBYTE * MEGABYTE;
/* say hello via the debug port with some information */
hvdebug!("Diosix {} :: Debug enabled. {} and {} RAM found",
/* build version number */
env!("CARGO_PKG_VERSION"),
/* report number of CPU cores found */
match hardware::get_nr_cpu_cores()
{
None | Some(0) => format!("no CPU cores"),
Some(1) => format!("1 CPU core"),
Some(c) => format!("{} CPU cores", c)
},
/* count up total system RAM using GiB / MiB / KiB */
match hardware::get_phys_ram_total()
{
Some(t) => if t >= GIGABYTE
{
format!("{} GiB", t / GIGABYTE)
}
else if t >= MEGABYTE
{
format!("{} MiB", t / MEGABYTE)
}
else
{
format!("{} KiB", t / KILOBYTE)
},
None => format!("no")
});
}
/* mandatory error handler for memory allocations */
#[alloc_error_handler]
fn hvalloc_error(attempt: core::alloc::Layout) ->!
{
let heap = &(*<pcore::PhysicalCore>::this()).heap;
hvalert!("hvalloc_error: Failed to allocate/free {} bytes. Heap: {:?}", attempt.size(), heap);
debughousekeeper!();
loop {} /* it would be nice to be able to not die here :( */
}
/* perform all unit tests required */
#[cfg(test)]
fn run_tests(unit_tests: &[&dyn Fn()])
{
/* run each test one by one */
for test in unit_tests
{
test();
}
/* exit cleanly once tests are complete */
platform::test::end(Ok(0));
}
#[test_case]
fn test_assertion()
|
{
assert_eq!(42, 42);
}
|
identifier_body
|
|
main.rs
|
/* diosix hypervisor main entry code
*
* (c) Chris Williams, 2019-2021.
*
* See LICENSE for usage and copying.
*/
/* let the compiler know we're on our own here in bare-metal world */
#![no_std]
#![no_main]
#![feature(asm)]
#![allow(dead_code)]
#![allow(unused_unsafe)]
#![allow(improper_ctypes)]
#![feature(type_ascription)]
/* provide a framework for unit testing */
#![feature(custom_test_frameworks)]
#![test_runner(crate::run_tests)]
#![reexport_test_harness_main = "hvtests"] /* entry point for tests */
/* plug our custom heap allocator into the Rust language: Box, etc */
#![feature(alloc_error_handler)]
#![feature(box_syntax)]
#[allow(unused_imports)]
#[macro_use]
extern crate alloc;
/* needed to convert raw dtb pointer into a slice */
use core::slice;
/* needed for fast lookup tables of stuff */
extern crate hashbrown;
/* needed for elf parsing */
extern crate xmas_elf;
/* needed for device tree parsing and manipulation */
extern crate devicetree;
/* needed for parsing diosix manifest file-system (DMFS) images bundled with the hypervisor */
extern crate dmfs;
/* needed for lazyily-allocated static variables */
#[macro_use]
extern crate lazy_static;
/* this will bring in all the hardware-specific code */
extern crate platform;
/* and now for all our non-hw specific code */
#[macro_use]
mod debug; /* get us some kind of debug output, typically to a serial port */
#[macro_use]
mod capsule; /* manage capsules */
#[macro_use]
mod heap; /* per-CPU private heap management */
#[macro_use]
mod physmem; /* manage host physical memory */
mod hardware; /* parse device trees into hardware objects */
mod panic; /* implement panic() handlers */
mod irq; /* handle hw interrupts and sw exceptions, collectively known as IRQs */
mod virtmem; /* manage capsule virtual memory */
mod pcore; /* manage CPU cores */
mod vcore; /* virtual CPU core management... */
mod scheduler; /*...and scheduling */
mod loader; /* parse and load supervisor binaries */
mod message; /* send messages between physical cores */
mod service; /* allow capsules to register services */
mod manifest; /* manage capsules loaded with the hypervisor */
/* needed for exclusive locks */
mod lock;
use lock::Mutex;
/* list of error codes */
mod error;
use error::Cause;
use pcore::{PhysicalCoreID, BOOT_PCORE_ID};
/* tell Rust to use our HVallocator to allocate and free heap memory.
although we'll keep track of physical memory, we'll let Rust perform essential
tasks, such as dropping objects when it's no longer needed, borrow checking, etc */
#[global_allocator]
static HV_HEAP: heap::HVallocator = heap::HVallocator;
lazy_static!
{
/* set to true to allow physical CPU cores to start running supervisor code */
static ref INIT_DONE: Mutex<bool> = Mutex::new("system bring-up", false);
/* a physical CPU core obtaining this lock when it is false must walk the DMFS, create
capsules required to run at boot time, and set the flag to true. any other core
obtaining it as true must release the lock and move on */
static ref MANIFEST_UNPACKED: Mutex<bool> = Mutex::new("dmfs unpacked", false);
/* set to true if individual cores can sound off their presence and capabilities */
static ref ROLL_CALL: Mutex<bool> = Mutex::new("CPU roll call", false);
}
/* pointer sizes: stick to usize as much as possible: don't always assume it's a 64-bit machine */
/* hventry
This is the official entry point of the Rust-level hypervisor.
Call hvmain, which is where all the real work happens, and catch any errors.
=> cpu_nr = this boot-assigned CPU ID number
dtb_ptr = pointer to start of device tree blob structure
dtb_len = 32-bit big-endian length of the device tree blob
<= return to infinite loop, awaiting interrupts */
#[no_mangle]
pub extern "C" fn hventry(cpu_nr: PhysicalCoreID, dtb_ptr: *const u8, dtb_len: u32)
{
/* carry out tests if that's what we're here for */
#[cfg(test)]
hvtests();
/* if not performing tests, start the system as normal */
match hvmain(cpu_nr, dtb_ptr, dtb_len)
{
Err(e) =>
{
hvalert!("Hypervisor failed to start. Reason: {:?}", e);
debughousekeeper!(); /* attempt to flush queued debug to output */
},
_ => () /* continue waiting for an IRQ to come in */
}
}
/* hvmain
This code runs at the hypervisor level, with full physical memory access.
Its job is to initialize physical CPU cores and other resources so that capsules can be
created in which supervisors run that manage their own user spaces, in which
applications run. The hypervisor ensures capsules are kept apart using
hardware protections.
Assumes all physical CPU cores enter this function during startup.
The boot CPU is chosen to initialize the system in pre-SMP mode.
If we're on a single CPU core then everything should still run OK.
Assumes hardware and exception interrupts are enabled and handlers
installed.
Also assumes all CPU cores are compatible ISA-wise. There is provision
for marking some cores as more powerful than others for systems with
a mix of performance and efficiency CPU cores.
=> cpu_nr = arbitrary CPU core ID number assigned by boot code,
separate from hardware ID number.
BOOT_PCORE_ID = boot CPU core.
dtb_ptr = pointer to device tree in memory from bootlaoder
dtb_len = 32-bit big endian size of the device tree
<= return to infinite loop, waiting for interrupts
*/
fn hvmain(cpu_nr: PhysicalCoreID, dtb_ptr: *const u8, dtb_len: u32) -> Result<(), Cause>
{
/* set up each physical processor core with its own private heap pool and any other resources.
each private pool uses physical memory assigned by the pre-hvmain boot code. init() should be called
first thing to set up each processor core, including the boot CPU, which then sets up the global
resources. all non-boot CPUs should wait until global resources are ready. */
pcore::PhysicalCore::init(cpu_nr);
/* note that pre-physmem::init(), CPU cores rely on their pre-hventry()-assigned
heap space. after physmem::init(), CPU cores can extend their heaps using physical memory.
the hypervisor will become stuck pre-physmem::init() if it goes beyond its assigned heap space. */
match cpu_nr
{
/* delegate to boot CPU the welcome banner and set up global resources.
note: the platform code should ensure whichever CPU core is assigned
BOOT_PCORE_ID as its cpu_nr can initialize the hypervisor */
BOOT_PCORE_ID =>
{
/* convert the dtb pointer into a rust byte slice. assumes dtb_len is valid */
let dtb = unsafe { slice::from_raw_parts(dtb_ptr, u32::from_be(dtb_len) as usize) };
/* process device tree to create data structures representing system hardware,
allowing these peripherals to be accessed by subsequent routines. this should
also initialize any found hardware */
hardware::parse_and_init(dtb)?;
/* register all the available physical RAM */
physmem::init()?;
describe_system();
/* allow other cores to continue */
*(INIT_DONE.lock()) = true;
},
/* non-boot cores must wait here for early initialization to complete */
_ => while *(INIT_DONE.lock())!= true {}
}
/* Create capsules to run from the bundled DMFS image.
the hypervisor can't make any assumptions about the underlying hardware.
the device tree for these early capsules is derived from the host's device tree,
modified to virtualize peripherals. the virtual CPU cores will based on the
physical CPU core that creates it. this is more straightforward than the hypervisor
trying to specify a hypothetical CPU core
as such, only allow supervisor-mode capable CPU cores to build capasules */
if pcore::PhysicalCore::smode_supported() == true
{
/* only allow one core to do the unpacking */
let mut flag = MANIFEST_UNPACKED.lock();
if *flag == false
{
/* process the manifest and mark it as handled */
manifest::unpack_at_boot()?;
*flag = true;
/* allow all working cores to join the roll call */
*(ROLL_CALL.lock()) = true;
}
}
/* once ROLL_CALL is set to true, acknowledge we're alive and well, and report CPU core features */
while *(ROLL_CALL.lock())!= true {}
hvdebug!("Physical CPU core {:?} ready to roll", pcore::PhysicalCore::describe());
/* enable timer on this physical CPU core to start scheduling and running virtual cores */
scheduler::start()?;
/* initialization complete. fall through to infinite loop waiting for a timer interrupt
to come in. when it does fire, this stack will be flattened, a virtual CPU loaded up to run,
and this boot thread will disappear. thus, the call to start() should be the last thing
this boot thread does */
Ok(())
}
/* dump system information to the user */
fn describe_system()
{
const KILOBYTE: usize = 1024;
const MEGABYTE: usize = KILOBYTE * KILOBYTE;
const GIGABYTE: usize = KILOBYTE * MEGABYTE;
/* say hello via the debug port with some information */
hvdebug!("Diosix {} :: Debug enabled. {} and {} RAM found",
/* build version number */
env!("CARGO_PKG_VERSION"),
/* report number of CPU cores found */
match hardware::get_nr_cpu_cores()
{
None | Some(0) => format!("no CPU cores"),
Some(1) => format!("1 CPU core"),
Some(c) => format!("{} CPU cores", c)
},
/* count up total system RAM using GiB / MiB / KiB */
match hardware::get_phys_ram_total()
{
Some(t) => if t >= GIGABYTE
{
format!("{} GiB", t / GIGABYTE)
}
else if t >= MEGABYTE
{
format!("{} MiB", t / MEGABYTE)
}
else
{
format!("{} KiB", t / KILOBYTE)
},
None => format!("no")
});
}
/* mandatory error handler for memory allocations */
#[alloc_error_handler]
fn hvalloc_error(attempt: core::alloc::Layout) ->!
{
let heap = &(*<pcore::PhysicalCore>::this()).heap;
hvalert!("hvalloc_error: Failed to allocate/free {} bytes. Heap: {:?}", attempt.size(), heap);
debughousekeeper!();
loop {} /* it would be nice to be able to not die here :( */
}
/* perform all unit tests required */
#[cfg(test)]
fn
|
(unit_tests: &[&dyn Fn()])
{
/* run each test one by one */
for test in unit_tests
{
test();
}
/* exit cleanly once tests are complete */
platform::test::end(Ok(0));
}
#[test_case]
fn test_assertion()
{
assert_eq!(42, 42);
}
|
run_tests
|
identifier_name
|
generic-fn.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_assignment)]
fn
|
<T>(x: T) -> T { return x; }
#[derive(Copy)]
struct Triple {x: int, y: int, z: int}
pub fn main() {
let mut x = 62;
let mut y = 63;
let a = 'a';
let mut b = 'b';
let p: Triple = Triple {x: 65, y: 66, z: 67};
let mut q: Triple = Triple {x: 68, y: 69, z: 70};
y = id::<int>(x);
println!("{}", y);
assert_eq!(x, y);
b = id::<char>(a);
println!("{}", b);
assert_eq!(a, b);
q = id::<Triple>(p);
x = p.z;
y = q.z;
println!("{}", y);
assert_eq!(x, y);
}
|
id
|
identifier_name
|
generic-fn.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_assignment)]
fn id<T>(x: T) -> T
|
#[derive(Copy)]
struct Triple {x: int, y: int, z: int}
pub fn main() {
let mut x = 62;
let mut y = 63;
let a = 'a';
let mut b = 'b';
let p: Triple = Triple {x: 65, y: 66, z: 67};
let mut q: Triple = Triple {x: 68, y: 69, z: 70};
y = id::<int>(x);
println!("{}", y);
assert_eq!(x, y);
b = id::<char>(a);
println!("{}", b);
assert_eq!(a, b);
q = id::<Triple>(p);
x = p.z;
y = q.z;
println!("{}", y);
assert_eq!(x, y);
}
|
{ return x; }
|
identifier_body
|
generic-fn.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_assignment)]
fn id<T>(x: T) -> T { return x; }
#[derive(Copy)]
struct Triple {x: int, y: int, z: int}
|
let mut x = 62;
let mut y = 63;
let a = 'a';
let mut b = 'b';
let p: Triple = Triple {x: 65, y: 66, z: 67};
let mut q: Triple = Triple {x: 68, y: 69, z: 70};
y = id::<int>(x);
println!("{}", y);
assert_eq!(x, y);
b = id::<char>(a);
println!("{}", b);
assert_eq!(a, b);
q = id::<Triple>(p);
x = p.z;
y = q.z;
println!("{}", y);
assert_eq!(x, y);
}
|
pub fn main() {
|
random_line_split
|
blob_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use filemanager_thread::{FileManager, UIProvider};
use hyper::header::{DispositionType, ContentDisposition, DispositionParam};
use hyper::header::{Headers, ContentType, ContentLength, Charset};
use hyper::http::RawStatus;
use hyper_serde::Serde;
use ipc_channel::ipc;
use mime::{Mime, Attr};
use mime_classifier::MimeClassifier;
use net_traits::ProgressMsg::{Payload, Done};
use net_traits::blob_url_store::parse_blob_url;
use net_traits::filemanager_thread::{FileManagerThreadMsg, SelectedFileId, ReadFileProgress};
use net_traits::response::HttpsState;
use net_traits::{LoadConsumer, LoadData, Metadata, NetworkError};
use resource_thread::CancellationListener;
use resource_thread::{start_sending_sniffed_opt, send_error};
use std::boxed::FnBox;
use std::sync::Arc;
use util::thread::spawn_named;
// TODO: Check on GET
// https://w3c.github.io/FileAPI/#requestResponseModel
pub fn factory<UI:'static + UIProvider>(filemanager: Arc<FileManager<UI>>)
-> Box<FnBox(LoadData, LoadConsumer, Arc<MimeClassifier>, CancellationListener) + Send> {
box move |load_data: LoadData, start_chan, classifier, cancel_listener| {
spawn_named(format!("blob loader for {}", load_data.url), move || {
load_blob(load_data, start_chan, classifier, filemanager, cancel_listener);
})
}
}
fn load_blob<UI:'static + UIProvider>
(load_data: LoadData, start_chan: LoadConsumer,
classifier: Arc<MimeClassifier>,
filemanager: Arc<FileManager<UI>>,
cancel_listener: CancellationListener) {
let (chan, recv) = ipc::channel().unwrap();
if let Ok((id, origin, _fragment)) = parse_blob_url(&load_data.url.clone()) {
let id = SelectedFileId(id.simple().to_string());
let check_url_validity = true;
let msg = FileManagerThreadMsg::ReadFile(chan, id, check_url_validity, origin);
let _ = filemanager.handle(msg, Some(cancel_listener));
// Receive first chunk
match recv.recv().unwrap() {
Ok(ReadFileProgress::Meta(blob_buf)) => {
let content_type: Mime = blob_buf.type_string.parse().unwrap_or(mime!(Text / Plain));
let charset = content_type.get_param(Attr::Charset);
let mut headers = Headers::new();
if let Some(name) = blob_buf.filename {
let charset = charset.and_then(|c| c.as_str().parse().ok());
headers.set(ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![
DispositionParam::Filename(charset.unwrap_or(Charset::Us_Ascii),
None, name.as_bytes().to_vec())
|
]
});
}
headers.set(ContentType(content_type.clone()));
headers.set(ContentLength(blob_buf.size as u64));
let metadata = Metadata {
final_url: load_data.url.clone(),
content_type: Some(Serde(ContentType(content_type.clone()))),
charset: charset.map(|c| c.as_str().to_string()),
headers: Some(Serde(headers)),
// https://w3c.github.io/FileAPI/#TwoHundredOK
status: Some(Serde(RawStatus(200, "OK".into()))),
https_state: HttpsState::None,
referrer: None,
};
if let Ok(chan) =
start_sending_sniffed_opt(start_chan, metadata, classifier,
&blob_buf.bytes, load_data.context.clone()) {
let _ = chan.send(Payload(blob_buf.bytes));
loop {
match recv.recv().unwrap() {
Ok(ReadFileProgress::Partial(bytes)) => {
let _ = chan.send(Payload(bytes));
}
Ok(ReadFileProgress::EOF) => {
let _ = chan.send(Done(Ok(())));
return;
}
Ok(_) => {
let err = NetworkError::Internal("Invalid filemanager reply".to_string());
let _ = chan.send(Done(Err(err)));
return;
}
Err(e) => {
let err = NetworkError::Internal(format!("{:?}", e));
let _ = chan.send(Done(Err(err)));
return;
}
}
}
}
}
Ok(_) => {
let err = NetworkError::Internal("Invalid filemanager reply".to_string());
send_error(load_data.url, err, start_chan);
}
Err(e) => {
let err = NetworkError::Internal(format!("{:?}", e));
send_error(load_data.url, err, start_chan);
}
}
} else {
let e = format!("Invalid blob URL format {:?}", load_data.url);
let format_err = NetworkError::Internal(e);
send_error(load_data.url.clone(), format_err, start_chan);
}
}
|
random_line_split
|
|
blob_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use filemanager_thread::{FileManager, UIProvider};
use hyper::header::{DispositionType, ContentDisposition, DispositionParam};
use hyper::header::{Headers, ContentType, ContentLength, Charset};
use hyper::http::RawStatus;
use hyper_serde::Serde;
use ipc_channel::ipc;
use mime::{Mime, Attr};
use mime_classifier::MimeClassifier;
use net_traits::ProgressMsg::{Payload, Done};
use net_traits::blob_url_store::parse_blob_url;
use net_traits::filemanager_thread::{FileManagerThreadMsg, SelectedFileId, ReadFileProgress};
use net_traits::response::HttpsState;
use net_traits::{LoadConsumer, LoadData, Metadata, NetworkError};
use resource_thread::CancellationListener;
use resource_thread::{start_sending_sniffed_opt, send_error};
use std::boxed::FnBox;
use std::sync::Arc;
use util::thread::spawn_named;
// TODO: Check on GET
// https://w3c.github.io/FileAPI/#requestResponseModel
pub fn factory<UI:'static + UIProvider>(filemanager: Arc<FileManager<UI>>)
-> Box<FnBox(LoadData, LoadConsumer, Arc<MimeClassifier>, CancellationListener) + Send> {
box move |load_data: LoadData, start_chan, classifier, cancel_listener| {
spawn_named(format!("blob loader for {}", load_data.url), move || {
load_blob(load_data, start_chan, classifier, filemanager, cancel_listener);
})
}
}
fn
|
<UI:'static + UIProvider>
(load_data: LoadData, start_chan: LoadConsumer,
classifier: Arc<MimeClassifier>,
filemanager: Arc<FileManager<UI>>,
cancel_listener: CancellationListener) {
let (chan, recv) = ipc::channel().unwrap();
if let Ok((id, origin, _fragment)) = parse_blob_url(&load_data.url.clone()) {
let id = SelectedFileId(id.simple().to_string());
let check_url_validity = true;
let msg = FileManagerThreadMsg::ReadFile(chan, id, check_url_validity, origin);
let _ = filemanager.handle(msg, Some(cancel_listener));
// Receive first chunk
match recv.recv().unwrap() {
Ok(ReadFileProgress::Meta(blob_buf)) => {
let content_type: Mime = blob_buf.type_string.parse().unwrap_or(mime!(Text / Plain));
let charset = content_type.get_param(Attr::Charset);
let mut headers = Headers::new();
if let Some(name) = blob_buf.filename {
let charset = charset.and_then(|c| c.as_str().parse().ok());
headers.set(ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![
DispositionParam::Filename(charset.unwrap_or(Charset::Us_Ascii),
None, name.as_bytes().to_vec())
]
});
}
headers.set(ContentType(content_type.clone()));
headers.set(ContentLength(blob_buf.size as u64));
let metadata = Metadata {
final_url: load_data.url.clone(),
content_type: Some(Serde(ContentType(content_type.clone()))),
charset: charset.map(|c| c.as_str().to_string()),
headers: Some(Serde(headers)),
// https://w3c.github.io/FileAPI/#TwoHundredOK
status: Some(Serde(RawStatus(200, "OK".into()))),
https_state: HttpsState::None,
referrer: None,
};
if let Ok(chan) =
start_sending_sniffed_opt(start_chan, metadata, classifier,
&blob_buf.bytes, load_data.context.clone()) {
let _ = chan.send(Payload(blob_buf.bytes));
loop {
match recv.recv().unwrap() {
Ok(ReadFileProgress::Partial(bytes)) => {
let _ = chan.send(Payload(bytes));
}
Ok(ReadFileProgress::EOF) => {
let _ = chan.send(Done(Ok(())));
return;
}
Ok(_) => {
let err = NetworkError::Internal("Invalid filemanager reply".to_string());
let _ = chan.send(Done(Err(err)));
return;
}
Err(e) => {
let err = NetworkError::Internal(format!("{:?}", e));
let _ = chan.send(Done(Err(err)));
return;
}
}
}
}
}
Ok(_) => {
let err = NetworkError::Internal("Invalid filemanager reply".to_string());
send_error(load_data.url, err, start_chan);
}
Err(e) => {
let err = NetworkError::Internal(format!("{:?}", e));
send_error(load_data.url, err, start_chan);
}
}
} else {
let e = format!("Invalid blob URL format {:?}", load_data.url);
let format_err = NetworkError::Internal(e);
send_error(load_data.url.clone(), format_err, start_chan);
}
}
|
load_blob
|
identifier_name
|
blob_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use filemanager_thread::{FileManager, UIProvider};
use hyper::header::{DispositionType, ContentDisposition, DispositionParam};
use hyper::header::{Headers, ContentType, ContentLength, Charset};
use hyper::http::RawStatus;
use hyper_serde::Serde;
use ipc_channel::ipc;
use mime::{Mime, Attr};
use mime_classifier::MimeClassifier;
use net_traits::ProgressMsg::{Payload, Done};
use net_traits::blob_url_store::parse_blob_url;
use net_traits::filemanager_thread::{FileManagerThreadMsg, SelectedFileId, ReadFileProgress};
use net_traits::response::HttpsState;
use net_traits::{LoadConsumer, LoadData, Metadata, NetworkError};
use resource_thread::CancellationListener;
use resource_thread::{start_sending_sniffed_opt, send_error};
use std::boxed::FnBox;
use std::sync::Arc;
use util::thread::spawn_named;
// TODO: Check on GET
// https://w3c.github.io/FileAPI/#requestResponseModel
pub fn factory<UI:'static + UIProvider>(filemanager: Arc<FileManager<UI>>)
-> Box<FnBox(LoadData, LoadConsumer, Arc<MimeClassifier>, CancellationListener) + Send>
|
fn load_blob<UI:'static + UIProvider>
(load_data: LoadData, start_chan: LoadConsumer,
classifier: Arc<MimeClassifier>,
filemanager: Arc<FileManager<UI>>,
cancel_listener: CancellationListener) {
let (chan, recv) = ipc::channel().unwrap();
if let Ok((id, origin, _fragment)) = parse_blob_url(&load_data.url.clone()) {
let id = SelectedFileId(id.simple().to_string());
let check_url_validity = true;
let msg = FileManagerThreadMsg::ReadFile(chan, id, check_url_validity, origin);
let _ = filemanager.handle(msg, Some(cancel_listener));
// Receive first chunk
match recv.recv().unwrap() {
Ok(ReadFileProgress::Meta(blob_buf)) => {
let content_type: Mime = blob_buf.type_string.parse().unwrap_or(mime!(Text / Plain));
let charset = content_type.get_param(Attr::Charset);
let mut headers = Headers::new();
if let Some(name) = blob_buf.filename {
let charset = charset.and_then(|c| c.as_str().parse().ok());
headers.set(ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![
DispositionParam::Filename(charset.unwrap_or(Charset::Us_Ascii),
None, name.as_bytes().to_vec())
]
});
}
headers.set(ContentType(content_type.clone()));
headers.set(ContentLength(blob_buf.size as u64));
let metadata = Metadata {
final_url: load_data.url.clone(),
content_type: Some(Serde(ContentType(content_type.clone()))),
charset: charset.map(|c| c.as_str().to_string()),
headers: Some(Serde(headers)),
// https://w3c.github.io/FileAPI/#TwoHundredOK
status: Some(Serde(RawStatus(200, "OK".into()))),
https_state: HttpsState::None,
referrer: None,
};
if let Ok(chan) =
start_sending_sniffed_opt(start_chan, metadata, classifier,
&blob_buf.bytes, load_data.context.clone()) {
let _ = chan.send(Payload(blob_buf.bytes));
loop {
match recv.recv().unwrap() {
Ok(ReadFileProgress::Partial(bytes)) => {
let _ = chan.send(Payload(bytes));
}
Ok(ReadFileProgress::EOF) => {
let _ = chan.send(Done(Ok(())));
return;
}
Ok(_) => {
let err = NetworkError::Internal("Invalid filemanager reply".to_string());
let _ = chan.send(Done(Err(err)));
return;
}
Err(e) => {
let err = NetworkError::Internal(format!("{:?}", e));
let _ = chan.send(Done(Err(err)));
return;
}
}
}
}
}
Ok(_) => {
let err = NetworkError::Internal("Invalid filemanager reply".to_string());
send_error(load_data.url, err, start_chan);
}
Err(e) => {
let err = NetworkError::Internal(format!("{:?}", e));
send_error(load_data.url, err, start_chan);
}
}
} else {
let e = format!("Invalid blob URL format {:?}", load_data.url);
let format_err = NetworkError::Internal(e);
send_error(load_data.url.clone(), format_err, start_chan);
}
}
|
{
box move |load_data: LoadData, start_chan, classifier, cancel_listener| {
spawn_named(format!("blob loader for {}", load_data.url), move || {
load_blob(load_data, start_chan, classifier, filemanager, cancel_listener);
})
}
}
|
identifier_body
|
blob_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use filemanager_thread::{FileManager, UIProvider};
use hyper::header::{DispositionType, ContentDisposition, DispositionParam};
use hyper::header::{Headers, ContentType, ContentLength, Charset};
use hyper::http::RawStatus;
use hyper_serde::Serde;
use ipc_channel::ipc;
use mime::{Mime, Attr};
use mime_classifier::MimeClassifier;
use net_traits::ProgressMsg::{Payload, Done};
use net_traits::blob_url_store::parse_blob_url;
use net_traits::filemanager_thread::{FileManagerThreadMsg, SelectedFileId, ReadFileProgress};
use net_traits::response::HttpsState;
use net_traits::{LoadConsumer, LoadData, Metadata, NetworkError};
use resource_thread::CancellationListener;
use resource_thread::{start_sending_sniffed_opt, send_error};
use std::boxed::FnBox;
use std::sync::Arc;
use util::thread::spawn_named;
// TODO: Check on GET
// https://w3c.github.io/FileAPI/#requestResponseModel
pub fn factory<UI:'static + UIProvider>(filemanager: Arc<FileManager<UI>>)
-> Box<FnBox(LoadData, LoadConsumer, Arc<MimeClassifier>, CancellationListener) + Send> {
box move |load_data: LoadData, start_chan, classifier, cancel_listener| {
spawn_named(format!("blob loader for {}", load_data.url), move || {
load_blob(load_data, start_chan, classifier, filemanager, cancel_listener);
})
}
}
fn load_blob<UI:'static + UIProvider>
(load_data: LoadData, start_chan: LoadConsumer,
classifier: Arc<MimeClassifier>,
filemanager: Arc<FileManager<UI>>,
cancel_listener: CancellationListener) {
let (chan, recv) = ipc::channel().unwrap();
if let Ok((id, origin, _fragment)) = parse_blob_url(&load_data.url.clone()) {
let id = SelectedFileId(id.simple().to_string());
let check_url_validity = true;
let msg = FileManagerThreadMsg::ReadFile(chan, id, check_url_validity, origin);
let _ = filemanager.handle(msg, Some(cancel_listener));
// Receive first chunk
match recv.recv().unwrap() {
Ok(ReadFileProgress::Meta(blob_buf)) => {
let content_type: Mime = blob_buf.type_string.parse().unwrap_or(mime!(Text / Plain));
let charset = content_type.get_param(Attr::Charset);
let mut headers = Headers::new();
if let Some(name) = blob_buf.filename {
let charset = charset.and_then(|c| c.as_str().parse().ok());
headers.set(ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![
DispositionParam::Filename(charset.unwrap_or(Charset::Us_Ascii),
None, name.as_bytes().to_vec())
]
});
}
headers.set(ContentType(content_type.clone()));
headers.set(ContentLength(blob_buf.size as u64));
let metadata = Metadata {
final_url: load_data.url.clone(),
content_type: Some(Serde(ContentType(content_type.clone()))),
charset: charset.map(|c| c.as_str().to_string()),
headers: Some(Serde(headers)),
// https://w3c.github.io/FileAPI/#TwoHundredOK
status: Some(Serde(RawStatus(200, "OK".into()))),
https_state: HttpsState::None,
referrer: None,
};
if let Ok(chan) =
start_sending_sniffed_opt(start_chan, metadata, classifier,
&blob_buf.bytes, load_data.context.clone()) {
let _ = chan.send(Payload(blob_buf.bytes));
loop {
match recv.recv().unwrap() {
Ok(ReadFileProgress::Partial(bytes)) => {
let _ = chan.send(Payload(bytes));
}
Ok(ReadFileProgress::EOF) => {
let _ = chan.send(Done(Ok(())));
return;
}
Ok(_) => {
let err = NetworkError::Internal("Invalid filemanager reply".to_string());
let _ = chan.send(Done(Err(err)));
return;
}
Err(e) => {
let err = NetworkError::Internal(format!("{:?}", e));
let _ = chan.send(Done(Err(err)));
return;
}
}
}
}
}
Ok(_) =>
|
Err(e) => {
let err = NetworkError::Internal(format!("{:?}", e));
send_error(load_data.url, err, start_chan);
}
}
} else {
let e = format!("Invalid blob URL format {:?}", load_data.url);
let format_err = NetworkError::Internal(e);
send_error(load_data.url.clone(), format_err, start_chan);
}
}
|
{
let err = NetworkError::Internal("Invalid filemanager reply".to_string());
send_error(load_data.url, err, start_chan);
}
|
conditional_block
|
helpers.rs
|
extern crate sodiumoxide;
use sodiumoxide::crypto::asymmetricbox::{PublicKey, SecretKey, PUBLICKEYBYTES, NONCEBYTES, Nonce};
use std::vec::MutableCloneableVector;
pub trait KeyBytes {
fn key_bytes<'a>(&'a self) -> &'a [u8];
}
impl KeyBytes for PublicKey {
fn key_bytes<'a>(&'a self) -> &'a [u8] {
match self {
&PublicKey(ref bytes) => bytes.as_slice()
}
}
}
impl KeyBytes for SecretKey {
fn key_bytes<'a>(&'a self) -> &'a [u8] {
match self {
&SecretKey(ref bytes) => bytes.as_slice()
}
}
}
impl KeyBytes for Nonce {
fn key_bytes<'a>(&'a self) -> &'a [u8] {
match self {
&Nonce(ref bytes) => bytes.as_slice()
}
}
}
pub fn bytes_to_pubkey(bytes: &[u8]) -> PublicKey {
let mut buf: [u8,..PUBLICKEYBYTES] = [0,..PUBLICKEYBYTES];
buf.copy_from(bytes);
|
PublicKey(buf)
}
pub fn bytes_to_nonce(bytes: &[u8]) -> Nonce {
let mut buf: [u8,..NONCEBYTES] = [0,..NONCEBYTES];
buf.copy_from(bytes);
Nonce(buf)
}
/*pub trait GetInner<T> {
fn get_inner<'a>(&'a self) -> &'a T;
}
impl<Nonce> GetInner<Nonce> for consts::NonceWrap {
fn get_inner<'a>(&'a self) -> &'a Nonce {
match *self {
consts::NonceWrap(ref inner) => inner
}
}
}*/
|
random_line_split
|
|
helpers.rs
|
extern crate sodiumoxide;
use sodiumoxide::crypto::asymmetricbox::{PublicKey, SecretKey, PUBLICKEYBYTES, NONCEBYTES, Nonce};
use std::vec::MutableCloneableVector;
pub trait KeyBytes {
fn key_bytes<'a>(&'a self) -> &'a [u8];
}
impl KeyBytes for PublicKey {
fn key_bytes<'a>(&'a self) -> &'a [u8] {
match self {
&PublicKey(ref bytes) => bytes.as_slice()
}
}
}
impl KeyBytes for SecretKey {
fn key_bytes<'a>(&'a self) -> &'a [u8] {
match self {
&SecretKey(ref bytes) => bytes.as_slice()
}
}
}
impl KeyBytes for Nonce {
fn key_bytes<'a>(&'a self) -> &'a [u8] {
match self {
&Nonce(ref bytes) => bytes.as_slice()
}
}
}
pub fn
|
(bytes: &[u8]) -> PublicKey {
let mut buf: [u8,..PUBLICKEYBYTES] = [0,..PUBLICKEYBYTES];
buf.copy_from(bytes);
PublicKey(buf)
}
pub fn bytes_to_nonce(bytes: &[u8]) -> Nonce {
let mut buf: [u8,..NONCEBYTES] = [0,..NONCEBYTES];
buf.copy_from(bytes);
Nonce(buf)
}
/*pub trait GetInner<T> {
fn get_inner<'a>(&'a self) -> &'a T;
}
impl<Nonce> GetInner<Nonce> for consts::NonceWrap {
fn get_inner<'a>(&'a self) -> &'a Nonce {
match *self {
consts::NonceWrap(ref inner) => inner
}
}
}*/
|
bytes_to_pubkey
|
identifier_name
|
helpers.rs
|
extern crate sodiumoxide;
use sodiumoxide::crypto::asymmetricbox::{PublicKey, SecretKey, PUBLICKEYBYTES, NONCEBYTES, Nonce};
use std::vec::MutableCloneableVector;
pub trait KeyBytes {
fn key_bytes<'a>(&'a self) -> &'a [u8];
}
impl KeyBytes for PublicKey {
fn key_bytes<'a>(&'a self) -> &'a [u8] {
match self {
&PublicKey(ref bytes) => bytes.as_slice()
}
}
}
impl KeyBytes for SecretKey {
fn key_bytes<'a>(&'a self) -> &'a [u8]
|
}
impl KeyBytes for Nonce {
fn key_bytes<'a>(&'a self) -> &'a [u8] {
match self {
&Nonce(ref bytes) => bytes.as_slice()
}
}
}
pub fn bytes_to_pubkey(bytes: &[u8]) -> PublicKey {
let mut buf: [u8,..PUBLICKEYBYTES] = [0,..PUBLICKEYBYTES];
buf.copy_from(bytes);
PublicKey(buf)
}
pub fn bytes_to_nonce(bytes: &[u8]) -> Nonce {
let mut buf: [u8,..NONCEBYTES] = [0,..NONCEBYTES];
buf.copy_from(bytes);
Nonce(buf)
}
/*pub trait GetInner<T> {
fn get_inner<'a>(&'a self) -> &'a T;
}
impl<Nonce> GetInner<Nonce> for consts::NonceWrap {
fn get_inner<'a>(&'a self) -> &'a Nonce {
match *self {
consts::NonceWrap(ref inner) => inner
}
}
}*/
|
{
match self {
&SecretKey(ref bytes) => bytes.as_slice()
}
}
|
identifier_body
|
memorydb.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Reference-counted memory-based `HashDB` implementation.
use hash::*;
use rlp::*;
use sha3::*;
use hashdb::*;
use heapsize::*;
use std::mem;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
/// Reference-counted memory-based `HashDB` implementation.
///
/// Use `new()` to create a new database. Insert items with `insert()`, remove items
/// with `remove()`, check for existence with `containce()` and lookup a hash to derive
/// the data with `get()`. Clear with `clear()` and purge the portions of the data
/// that have no references with `purge()`.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let d = "Hello world!".as_bytes();
///
/// let k = m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.insert(d);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.insert(d);
/// assert!(!m.contains(&k));
/// m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
/// }
/// ```
#[derive(Default, Clone, PartialEq)]
pub struct MemoryDB {
data: H256FastMap<(DBValue, i32)>,
}
impl MemoryDB {
/// Create a new instance of the memory DB.
pub fn new() -> MemoryDB {
MemoryDB {
data: H256FastMap::default(),
}
}
/// Clear all data from the database.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let hello_bytes = "Hello world!".as_bytes();
/// let hash = m.insert(hello_bytes);
/// assert!(m.contains(&hash));
/// m.clear();
/// assert!(!m.contains(&hash));
/// }
/// ```
pub fn clear(&mut self) {
self.data.clear();
}
/// Purge all zero-referenced data from the database.
pub fn purge(&mut self)
|
/// Return the internal map of hashes to data, clearing the current state.
pub fn drain(&mut self) -> H256FastMap<(DBValue, i32)> {
mem::replace(&mut self.data, H256FastMap::default())
}
/// Grab the raw information associated with a key. Returns None if the key
/// doesn't exist.
///
/// Even when Some is returned, the data is only guaranteed to be useful
/// when the refs > 0.
pub fn raw(&self, key: &H256) -> Option<(DBValue, i32)> {
if key == &SHA3_NULL_RLP {
return Some((DBValue::from_slice(&NULL_RLP_STATIC), 1));
}
self.data.get(key).cloned()
}
/// Returns the size of allocated heap memory
pub fn mem_used(&self) -> usize {
self.data.heap_size_of_children()
}
/// Remove an element and delete it from storage if reference count reaches zero.
pub fn remove_and_purge(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
match self.data.entry(key.clone()) {
Entry::Occupied(mut entry) =>
if entry.get().1 == 1 {
entry.remove();
} else {
entry.get_mut().1 -= 1;
},
Entry::Vacant(entry) => {
entry.insert((DBValue::new(), -1));
}
}
}
/// Consolidate all the entries of `other` into `self`.
pub fn consolidate(&mut self, mut other: Self) {
for (key, (value, rc)) in other.drain() {
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
if entry.get().1 < 0 {
entry.get_mut().0 = value;
}
entry.get_mut().1 += rc;
}
Entry::Vacant(entry) => {
entry.insert((value, rc));
}
}
}
}
}
static NULL_RLP_STATIC: [u8; 1] = [0x80; 1];
impl HashDB for MemoryDB {
fn get(&self, key: &H256) -> Option<DBValue> {
if key == &SHA3_NULL_RLP {
return Some(DBValue::from_slice(&NULL_RLP_STATIC));
}
match self.data.get(key) {
Some(&(ref d, rc)) if rc > 0 => Some(d.clone()),
_ => None
}
}
fn keys(&self) -> HashMap<H256, i32> {
self.data.iter().filter_map(|(k, v)| if v.1!= 0 {Some((k.clone(), v.1))} else {None}).collect()
}
fn contains(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
match self.data.get(key) {
Some(&(_, x)) if x > 0 => true,
_ => false
}
}
fn insert(&mut self, value: &[u8]) -> H256 {
if value == &NULL_RLP {
return SHA3_NULL_RLP.clone();
}
let key = value.sha3();
if match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32... 0)) => {
*old_value = DBValue::from_slice(value);
*rc += 1;
false
},
Some(&mut (_, ref mut x)) => { *x += 1; false },
None => true,
}{ //... None falls through into...
self.data.insert(key.clone(), (DBValue::from_slice(value), 1));
}
key
}
fn emplace(&mut self, key: H256, value: DBValue) {
if &*value == &NULL_RLP {
return;
}
match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32... 0)) => {
*old_value = value;
*rc += 1;
return;
},
Some(&mut (_, ref mut x)) => { *x += 1; return; },
None => {},
}
//... None falls through into...
self.data.insert(key, (value, 1));
}
fn remove(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
if match self.data.get_mut(key) {
Some(&mut (_, ref mut x)) => { *x -= 1; false }
None => true
}{ //... None falls through into...
self.data.insert(key.clone(), (DBValue::new(), -1));
}
}
}
#[test]
fn memorydb_remove_and_purge() {
let hello_bytes = b"Hello world!";
let hello_key = hello_bytes.sha3();
let mut m = MemoryDB::new();
m.remove(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.purge();
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 0);
m.purge();
assert_eq!(m.raw(&hello_key), None);
let mut m = MemoryDB::new();
m.remove_and_purge(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 1);
m.remove_and_purge(&hello_key);
assert_eq!(m.raw(&hello_key), None);
}
#[test]
fn consolidate() {
let mut main = MemoryDB::new();
let mut other = MemoryDB::new();
let remove_key = other.insert(b"doggo");
main.remove(&remove_key);
let insert_key = other.insert(b"arf");
main.emplace(insert_key, DBValue::from_slice(b"arf"));
main.consolidate(other);
let overlay = main.drain();
assert_eq!(overlay.get(&remove_key).unwrap(), &(DBValue::from_slice(b"doggo"), 0));
assert_eq!(overlay.get(&insert_key).unwrap(), &(DBValue::from_slice(b"arf"), 2));
}
|
{
let empties: Vec<_> = self.data.iter()
.filter(|&(_, &(_, rc))| rc == 0)
.map(|(k, _)| k.clone())
.collect();
for empty in empties { self.data.remove(&empty); }
}
|
identifier_body
|
memorydb.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Reference-counted memory-based `HashDB` implementation.
use hash::*;
use rlp::*;
use sha3::*;
use hashdb::*;
use heapsize::*;
use std::mem;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
/// Reference-counted memory-based `HashDB` implementation.
///
/// Use `new()` to create a new database. Insert items with `insert()`, remove items
/// with `remove()`, check for existence with `containce()` and lookup a hash to derive
/// the data with `get()`. Clear with `clear()` and purge the portions of the data
/// that have no references with `purge()`.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let d = "Hello world!".as_bytes();
///
/// let k = m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.insert(d);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.insert(d);
/// assert!(!m.contains(&k));
/// m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
/// }
/// ```
#[derive(Default, Clone, PartialEq)]
pub struct MemoryDB {
data: H256FastMap<(DBValue, i32)>,
}
impl MemoryDB {
/// Create a new instance of the memory DB.
pub fn new() -> MemoryDB {
MemoryDB {
data: H256FastMap::default(),
}
}
/// Clear all data from the database.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let hello_bytes = "Hello world!".as_bytes();
/// let hash = m.insert(hello_bytes);
/// assert!(m.contains(&hash));
/// m.clear();
/// assert!(!m.contains(&hash));
/// }
/// ```
pub fn clear(&mut self) {
self.data.clear();
}
/// Purge all zero-referenced data from the database.
pub fn
|
(&mut self) {
let empties: Vec<_> = self.data.iter()
.filter(|&(_, &(_, rc))| rc == 0)
.map(|(k, _)| k.clone())
.collect();
for empty in empties { self.data.remove(&empty); }
}
/// Return the internal map of hashes to data, clearing the current state.
pub fn drain(&mut self) -> H256FastMap<(DBValue, i32)> {
mem::replace(&mut self.data, H256FastMap::default())
}
/// Grab the raw information associated with a key. Returns None if the key
/// doesn't exist.
///
/// Even when Some is returned, the data is only guaranteed to be useful
/// when the refs > 0.
pub fn raw(&self, key: &H256) -> Option<(DBValue, i32)> {
if key == &SHA3_NULL_RLP {
return Some((DBValue::from_slice(&NULL_RLP_STATIC), 1));
}
self.data.get(key).cloned()
}
/// Returns the size of allocated heap memory
pub fn mem_used(&self) -> usize {
self.data.heap_size_of_children()
}
/// Remove an element and delete it from storage if reference count reaches zero.
pub fn remove_and_purge(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
match self.data.entry(key.clone()) {
Entry::Occupied(mut entry) =>
if entry.get().1 == 1 {
entry.remove();
} else {
entry.get_mut().1 -= 1;
},
Entry::Vacant(entry) => {
entry.insert((DBValue::new(), -1));
}
}
}
/// Consolidate all the entries of `other` into `self`.
pub fn consolidate(&mut self, mut other: Self) {
for (key, (value, rc)) in other.drain() {
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
if entry.get().1 < 0 {
entry.get_mut().0 = value;
}
entry.get_mut().1 += rc;
}
Entry::Vacant(entry) => {
entry.insert((value, rc));
}
}
}
}
}
static NULL_RLP_STATIC: [u8; 1] = [0x80; 1];
impl HashDB for MemoryDB {
fn get(&self, key: &H256) -> Option<DBValue> {
if key == &SHA3_NULL_RLP {
return Some(DBValue::from_slice(&NULL_RLP_STATIC));
}
match self.data.get(key) {
Some(&(ref d, rc)) if rc > 0 => Some(d.clone()),
_ => None
}
}
fn keys(&self) -> HashMap<H256, i32> {
self.data.iter().filter_map(|(k, v)| if v.1!= 0 {Some((k.clone(), v.1))} else {None}).collect()
}
fn contains(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
match self.data.get(key) {
Some(&(_, x)) if x > 0 => true,
_ => false
}
}
fn insert(&mut self, value: &[u8]) -> H256 {
if value == &NULL_RLP {
return SHA3_NULL_RLP.clone();
}
let key = value.sha3();
if match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32... 0)) => {
*old_value = DBValue::from_slice(value);
*rc += 1;
false
},
Some(&mut (_, ref mut x)) => { *x += 1; false },
None => true,
}{ //... None falls through into...
self.data.insert(key.clone(), (DBValue::from_slice(value), 1));
}
key
}
fn emplace(&mut self, key: H256, value: DBValue) {
if &*value == &NULL_RLP {
return;
}
match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32... 0)) => {
*old_value = value;
*rc += 1;
return;
},
Some(&mut (_, ref mut x)) => { *x += 1; return; },
None => {},
}
//... None falls through into...
self.data.insert(key, (value, 1));
}
fn remove(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
if match self.data.get_mut(key) {
Some(&mut (_, ref mut x)) => { *x -= 1; false }
None => true
}{ //... None falls through into...
self.data.insert(key.clone(), (DBValue::new(), -1));
}
}
}
#[test]
fn memorydb_remove_and_purge() {
let hello_bytes = b"Hello world!";
let hello_key = hello_bytes.sha3();
let mut m = MemoryDB::new();
m.remove(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.purge();
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 0);
m.purge();
assert_eq!(m.raw(&hello_key), None);
let mut m = MemoryDB::new();
m.remove_and_purge(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 1);
m.remove_and_purge(&hello_key);
assert_eq!(m.raw(&hello_key), None);
}
#[test]
fn consolidate() {
let mut main = MemoryDB::new();
let mut other = MemoryDB::new();
let remove_key = other.insert(b"doggo");
main.remove(&remove_key);
let insert_key = other.insert(b"arf");
main.emplace(insert_key, DBValue::from_slice(b"arf"));
main.consolidate(other);
let overlay = main.drain();
assert_eq!(overlay.get(&remove_key).unwrap(), &(DBValue::from_slice(b"doggo"), 0));
assert_eq!(overlay.get(&insert_key).unwrap(), &(DBValue::from_slice(b"arf"), 2));
}
|
purge
|
identifier_name
|
memorydb.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Reference-counted memory-based `HashDB` implementation.
use hash::*;
use rlp::*;
use sha3::*;
use hashdb::*;
use heapsize::*;
use std::mem;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
/// Reference-counted memory-based `HashDB` implementation.
///
/// Use `new()` to create a new database. Insert items with `insert()`, remove items
/// with `remove()`, check for existence with `containce()` and lookup a hash to derive
/// the data with `get()`. Clear with `clear()` and purge the portions of the data
/// that have no references with `purge()`.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let d = "Hello world!".as_bytes();
///
/// let k = m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.insert(d);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.insert(d);
/// assert!(!m.contains(&k));
/// m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
/// }
/// ```
#[derive(Default, Clone, PartialEq)]
pub struct MemoryDB {
data: H256FastMap<(DBValue, i32)>,
}
impl MemoryDB {
/// Create a new instance of the memory DB.
pub fn new() -> MemoryDB {
MemoryDB {
data: H256FastMap::default(),
}
}
/// Clear all data from the database.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let hello_bytes = "Hello world!".as_bytes();
/// let hash = m.insert(hello_bytes);
/// assert!(m.contains(&hash));
/// m.clear();
/// assert!(!m.contains(&hash));
/// }
/// ```
pub fn clear(&mut self) {
self.data.clear();
}
/// Purge all zero-referenced data from the database.
pub fn purge(&mut self) {
let empties: Vec<_> = self.data.iter()
.filter(|&(_, &(_, rc))| rc == 0)
.map(|(k, _)| k.clone())
.collect();
for empty in empties { self.data.remove(&empty); }
}
/// Return the internal map of hashes to data, clearing the current state.
pub fn drain(&mut self) -> H256FastMap<(DBValue, i32)> {
mem::replace(&mut self.data, H256FastMap::default())
}
/// Grab the raw information associated with a key. Returns None if the key
/// doesn't exist.
///
/// Even when Some is returned, the data is only guaranteed to be useful
/// when the refs > 0.
pub fn raw(&self, key: &H256) -> Option<(DBValue, i32)> {
if key == &SHA3_NULL_RLP {
return Some((DBValue::from_slice(&NULL_RLP_STATIC), 1));
}
self.data.get(key).cloned()
}
/// Returns the size of allocated heap memory
pub fn mem_used(&self) -> usize {
self.data.heap_size_of_children()
}
/// Remove an element and delete it from storage if reference count reaches zero.
pub fn remove_and_purge(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
|
return;
}
match self.data.entry(key.clone()) {
Entry::Occupied(mut entry) =>
if entry.get().1 == 1 {
entry.remove();
} else {
entry.get_mut().1 -= 1;
},
Entry::Vacant(entry) => {
entry.insert((DBValue::new(), -1));
}
}
}
/// Consolidate all the entries of `other` into `self`.
pub fn consolidate(&mut self, mut other: Self) {
for (key, (value, rc)) in other.drain() {
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
if entry.get().1 < 0 {
entry.get_mut().0 = value;
}
entry.get_mut().1 += rc;
}
Entry::Vacant(entry) => {
entry.insert((value, rc));
}
}
}
}
}
static NULL_RLP_STATIC: [u8; 1] = [0x80; 1];
impl HashDB for MemoryDB {
fn get(&self, key: &H256) -> Option<DBValue> {
if key == &SHA3_NULL_RLP {
return Some(DBValue::from_slice(&NULL_RLP_STATIC));
}
match self.data.get(key) {
Some(&(ref d, rc)) if rc > 0 => Some(d.clone()),
_ => None
}
}
fn keys(&self) -> HashMap<H256, i32> {
self.data.iter().filter_map(|(k, v)| if v.1!= 0 {Some((k.clone(), v.1))} else {None}).collect()
}
fn contains(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
match self.data.get(key) {
Some(&(_, x)) if x > 0 => true,
_ => false
}
}
fn insert(&mut self, value: &[u8]) -> H256 {
if value == &NULL_RLP {
return SHA3_NULL_RLP.clone();
}
let key = value.sha3();
if match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32... 0)) => {
*old_value = DBValue::from_slice(value);
*rc += 1;
false
},
Some(&mut (_, ref mut x)) => { *x += 1; false },
None => true,
}{ //... None falls through into...
self.data.insert(key.clone(), (DBValue::from_slice(value), 1));
}
key
}
fn emplace(&mut self, key: H256, value: DBValue) {
if &*value == &NULL_RLP {
return;
}
match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32... 0)) => {
*old_value = value;
*rc += 1;
return;
},
Some(&mut (_, ref mut x)) => { *x += 1; return; },
None => {},
}
//... None falls through into...
self.data.insert(key, (value, 1));
}
fn remove(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
if match self.data.get_mut(key) {
Some(&mut (_, ref mut x)) => { *x -= 1; false }
None => true
}{ //... None falls through into...
self.data.insert(key.clone(), (DBValue::new(), -1));
}
}
}
#[test]
fn memorydb_remove_and_purge() {
let hello_bytes = b"Hello world!";
let hello_key = hello_bytes.sha3();
let mut m = MemoryDB::new();
m.remove(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.purge();
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 0);
m.purge();
assert_eq!(m.raw(&hello_key), None);
let mut m = MemoryDB::new();
m.remove_and_purge(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 1);
m.remove_and_purge(&hello_key);
assert_eq!(m.raw(&hello_key), None);
}
#[test]
fn consolidate() {
let mut main = MemoryDB::new();
let mut other = MemoryDB::new();
let remove_key = other.insert(b"doggo");
main.remove(&remove_key);
let insert_key = other.insert(b"arf");
main.emplace(insert_key, DBValue::from_slice(b"arf"));
main.consolidate(other);
let overlay = main.drain();
assert_eq!(overlay.get(&remove_key).unwrap(), &(DBValue::from_slice(b"doggo"), 0));
assert_eq!(overlay.get(&insert_key).unwrap(), &(DBValue::from_slice(b"arf"), 2));
}
|
random_line_split
|
|
memorydb.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Reference-counted memory-based `HashDB` implementation.
use hash::*;
use rlp::*;
use sha3::*;
use hashdb::*;
use heapsize::*;
use std::mem;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
/// Reference-counted memory-based `HashDB` implementation.
///
/// Use `new()` to create a new database. Insert items with `insert()`, remove items
/// with `remove()`, check for existence with `containce()` and lookup a hash to derive
/// the data with `get()`. Clear with `clear()` and purge the portions of the data
/// that have no references with `purge()`.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let d = "Hello world!".as_bytes();
///
/// let k = m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.insert(d);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.insert(d);
/// assert!(!m.contains(&k));
/// m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
/// }
/// ```
#[derive(Default, Clone, PartialEq)]
pub struct MemoryDB {
data: H256FastMap<(DBValue, i32)>,
}
impl MemoryDB {
/// Create a new instance of the memory DB.
pub fn new() -> MemoryDB {
MemoryDB {
data: H256FastMap::default(),
}
}
/// Clear all data from the database.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let hello_bytes = "Hello world!".as_bytes();
/// let hash = m.insert(hello_bytes);
/// assert!(m.contains(&hash));
/// m.clear();
/// assert!(!m.contains(&hash));
/// }
/// ```
pub fn clear(&mut self) {
self.data.clear();
}
/// Purge all zero-referenced data from the database.
pub fn purge(&mut self) {
let empties: Vec<_> = self.data.iter()
.filter(|&(_, &(_, rc))| rc == 0)
.map(|(k, _)| k.clone())
.collect();
for empty in empties { self.data.remove(&empty); }
}
/// Return the internal map of hashes to data, clearing the current state.
pub fn drain(&mut self) -> H256FastMap<(DBValue, i32)> {
mem::replace(&mut self.data, H256FastMap::default())
}
/// Grab the raw information associated with a key. Returns None if the key
/// doesn't exist.
///
/// Even when Some is returned, the data is only guaranteed to be useful
/// when the refs > 0.
pub fn raw(&self, key: &H256) -> Option<(DBValue, i32)> {
if key == &SHA3_NULL_RLP {
return Some((DBValue::from_slice(&NULL_RLP_STATIC), 1));
}
self.data.get(key).cloned()
}
/// Returns the size of allocated heap memory
pub fn mem_used(&self) -> usize {
self.data.heap_size_of_children()
}
/// Remove an element and delete it from storage if reference count reaches zero.
pub fn remove_and_purge(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
match self.data.entry(key.clone()) {
Entry::Occupied(mut entry) =>
if entry.get().1 == 1 {
entry.remove();
} else {
entry.get_mut().1 -= 1;
},
Entry::Vacant(entry) => {
entry.insert((DBValue::new(), -1));
}
}
}
/// Consolidate all the entries of `other` into `self`.
pub fn consolidate(&mut self, mut other: Self) {
for (key, (value, rc)) in other.drain() {
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
if entry.get().1 < 0 {
entry.get_mut().0 = value;
}
entry.get_mut().1 += rc;
}
Entry::Vacant(entry) => {
entry.insert((value, rc));
}
}
}
}
}
static NULL_RLP_STATIC: [u8; 1] = [0x80; 1];
impl HashDB for MemoryDB {
fn get(&self, key: &H256) -> Option<DBValue> {
if key == &SHA3_NULL_RLP {
return Some(DBValue::from_slice(&NULL_RLP_STATIC));
}
match self.data.get(key) {
Some(&(ref d, rc)) if rc > 0 => Some(d.clone()),
_ => None
}
}
fn keys(&self) -> HashMap<H256, i32> {
self.data.iter().filter_map(|(k, v)| if v.1!= 0 {Some((k.clone(), v.1))} else {None}).collect()
}
fn contains(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
match self.data.get(key) {
Some(&(_, x)) if x > 0 => true,
_ => false
}
}
fn insert(&mut self, value: &[u8]) -> H256 {
if value == &NULL_RLP {
return SHA3_NULL_RLP.clone();
}
let key = value.sha3();
if match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32... 0)) => {
*old_value = DBValue::from_slice(value);
*rc += 1;
false
},
Some(&mut (_, ref mut x)) =>
|
,
None => true,
}{ //... None falls through into...
self.data.insert(key.clone(), (DBValue::from_slice(value), 1));
}
key
}
fn emplace(&mut self, key: H256, value: DBValue) {
if &*value == &NULL_RLP {
return;
}
match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32... 0)) => {
*old_value = value;
*rc += 1;
return;
},
Some(&mut (_, ref mut x)) => { *x += 1; return; },
None => {},
}
//... None falls through into...
self.data.insert(key, (value, 1));
}
fn remove(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
if match self.data.get_mut(key) {
Some(&mut (_, ref mut x)) => { *x -= 1; false }
None => true
}{ //... None falls through into...
self.data.insert(key.clone(), (DBValue::new(), -1));
}
}
}
#[test]
fn memorydb_remove_and_purge() {
let hello_bytes = b"Hello world!";
let hello_key = hello_bytes.sha3();
let mut m = MemoryDB::new();
m.remove(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.purge();
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 0);
m.purge();
assert_eq!(m.raw(&hello_key), None);
let mut m = MemoryDB::new();
m.remove_and_purge(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 1);
m.remove_and_purge(&hello_key);
assert_eq!(m.raw(&hello_key), None);
}
#[test]
fn consolidate() {
let mut main = MemoryDB::new();
let mut other = MemoryDB::new();
let remove_key = other.insert(b"doggo");
main.remove(&remove_key);
let insert_key = other.insert(b"arf");
main.emplace(insert_key, DBValue::from_slice(b"arf"));
main.consolidate(other);
let overlay = main.drain();
assert_eq!(overlay.get(&remove_key).unwrap(), &(DBValue::from_slice(b"doggo"), 0));
assert_eq!(overlay.get(&insert_key).unwrap(), &(DBValue::from_slice(b"arf"), 2));
}
|
{ *x += 1; false }
|
conditional_block
|
lib.rs
|
// Copyright (C) 2016 ParadoxSpiral
//
// This file is part of mpv-sys.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
|
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
#[inline]
/// Returns the associated error string.
pub fn mpv_error_str(e: mpv_error) -> &'static str {
let raw = unsafe { mpv_error_string(e) };
unsafe { ::std::ffi::CStr::from_ptr(raw) }.to_str().unwrap()
}
|
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
|
random_line_split
|
lib.rs
|
// Copyright (C) 2016 ParadoxSpiral
//
// This file is part of mpv-sys.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
#[inline]
/// Returns the associated error string.
pub fn mpv_error_str(e: mpv_error) -> &'static str
|
{
let raw = unsafe { mpv_error_string(e) };
unsafe { ::std::ffi::CStr::from_ptr(raw) }.to_str().unwrap()
}
|
identifier_body
|
|
lib.rs
|
// Copyright (C) 2016 ParadoxSpiral
//
// This file is part of mpv-sys.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
#[inline]
/// Returns the associated error string.
pub fn
|
(e: mpv_error) -> &'static str {
let raw = unsafe { mpv_error_string(e) };
unsafe { ::std::ffi::CStr::from_ptr(raw) }.to_str().unwrap()
}
|
mpv_error_str
|
identifier_name
|
resource-generic.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
struct
|
<T> {val: T, fin: extern fn(T)}
struct finish<T> {
arg: Arg<T>
}
#[unsafe_destructor]
impl<T:Copy> Drop for finish<T> {
fn drop(&self) {
unsafe {
(self.arg.fin)(copy self.arg.val);
}
}
}
fn finish<T:Copy>(arg: Arg<T>) -> finish<T> {
finish {
arg: arg
}
}
pub fn main() {
let box = @mut 10;
fn dec_box(i: @mut int) { *i -= 1; }
{ let _i = finish(Arg{val: box, fin: dec_box}); }
assert_eq!(*box, 9);
}
|
Arg
|
identifier_name
|
resource-generic.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
struct Arg<T> {val: T, fin: extern fn(T)}
struct finish<T> {
arg: Arg<T>
}
#[unsafe_destructor]
impl<T:Copy> Drop for finish<T> {
fn drop(&self) {
unsafe {
(self.arg.fin)(copy self.arg.val);
}
}
}
fn finish<T:Copy>(arg: Arg<T>) -> finish<T> {
finish {
arg: arg
}
}
|
{ let _i = finish(Arg{val: box, fin: dec_box}); }
assert_eq!(*box, 9);
}
|
pub fn main() {
let box = @mut 10;
fn dec_box(i: @mut int) { *i -= 1; }
|
random_line_split
|
util.rs
|
use std::path::{Path, PathBuf};
use std::collections::BTreeMap;
use unshare::{Command};
use crate::config::Container;
use crate::config::command::Run;
pub fn find_cmd(cmd: &str, env: &BTreeMap<String, String>)
-> Result<PathBuf, String>
{
if cmd.contains("/") {
return Ok(PathBuf::from(cmd));
} else {
if let Some(paths) = env.get(&"PATH".to_string()) {
for dir in paths[..].split(':') {
let path = Path::new(dir);
if!path.is_absolute() {
warn!("All items in PATH must be absolute, not {:?}",
path);
continue;
}
let path = path.join(cmd);
if path.exists() {
return Ok(path);
}
}
return Err(format!("Command {} not found in {:?}",
cmd, paths));
} else {
return Err(format!("Command {} is not absolute and no PATH set",
cmd));
}
}
}
pub fn warn_if_data_container(container_config: &Container)
|
pub fn gen_command(default_shell: &Vec<String>, cmdline: &Run,
env: &BTreeMap<String, String>)
-> Result<Command, String>
{
match *cmdline {
Run::Shell(ref data) => {
if default_shell.len() > 0 {
let mut cmd = Command::new(&default_shell[0]);
for arg in &default_shell[1..] {
if arg == "$cmdline" {
cmd.arg(data);
} else {
cmd.arg(arg);
}
}
return Ok(cmd);
} else {
let mut cmd = Command::new("/bin/sh");
cmd.arg("-c");
cmd.arg(data);
cmd.arg("--");
return Ok(cmd);
}
}
Run::Command(ref cmdline) => {
let cpath = find_cmd(&cmdline[0], &env)?;
let mut cmd = Command::new(&cpath);
cmd.args(&cmdline[1..]);
return Ok(cmd);
}
}
}
|
{
if container_config.is_data_container() {
warn!("You are trying to run command inside the data container. \
Data containers is designed to use as volumes inside other \
containers. Usually there are no system dirs at all.");
}
}
|
identifier_body
|
util.rs
|
use std::path::{Path, PathBuf};
use std::collections::BTreeMap;
use unshare::{Command};
use crate::config::Container;
use crate::config::command::Run;
pub fn
|
(cmd: &str, env: &BTreeMap<String, String>)
-> Result<PathBuf, String>
{
if cmd.contains("/") {
return Ok(PathBuf::from(cmd));
} else {
if let Some(paths) = env.get(&"PATH".to_string()) {
for dir in paths[..].split(':') {
let path = Path::new(dir);
if!path.is_absolute() {
warn!("All items in PATH must be absolute, not {:?}",
path);
continue;
}
let path = path.join(cmd);
if path.exists() {
return Ok(path);
}
}
return Err(format!("Command {} not found in {:?}",
cmd, paths));
} else {
return Err(format!("Command {} is not absolute and no PATH set",
cmd));
}
}
}
pub fn warn_if_data_container(container_config: &Container) {
if container_config.is_data_container() {
warn!("You are trying to run command inside the data container. \
Data containers is designed to use as volumes inside other \
containers. Usually there are no system dirs at all.");
}
}
pub fn gen_command(default_shell: &Vec<String>, cmdline: &Run,
env: &BTreeMap<String, String>)
-> Result<Command, String>
{
match *cmdline {
Run::Shell(ref data) => {
if default_shell.len() > 0 {
let mut cmd = Command::new(&default_shell[0]);
for arg in &default_shell[1..] {
if arg == "$cmdline" {
cmd.arg(data);
} else {
cmd.arg(arg);
}
}
return Ok(cmd);
} else {
let mut cmd = Command::new("/bin/sh");
cmd.arg("-c");
cmd.arg(data);
cmd.arg("--");
return Ok(cmd);
}
}
Run::Command(ref cmdline) => {
let cpath = find_cmd(&cmdline[0], &env)?;
let mut cmd = Command::new(&cpath);
cmd.args(&cmdline[1..]);
return Ok(cmd);
}
}
}
|
find_cmd
|
identifier_name
|
util.rs
|
use std::path::{Path, PathBuf};
use std::collections::BTreeMap;
use unshare::{Command};
use crate::config::Container;
use crate::config::command::Run;
pub fn find_cmd(cmd: &str, env: &BTreeMap<String, String>)
-> Result<PathBuf, String>
{
if cmd.contains("/") {
return Ok(PathBuf::from(cmd));
} else {
if let Some(paths) = env.get(&"PATH".to_string()) {
for dir in paths[..].split(':') {
let path = Path::new(dir);
if!path.is_absolute() {
warn!("All items in PATH must be absolute, not {:?}",
path);
continue;
}
let path = path.join(cmd);
if path.exists() {
return Ok(path);
}
}
return Err(format!("Command {} not found in {:?}",
cmd, paths));
} else {
return Err(format!("Command {} is not absolute and no PATH set",
cmd));
}
}
}
pub fn warn_if_data_container(container_config: &Container) {
if container_config.is_data_container() {
warn!("You are trying to run command inside the data container. \
Data containers is designed to use as volumes inside other \
containers. Usually there are no system dirs at all.");
}
}
pub fn gen_command(default_shell: &Vec<String>, cmdline: &Run,
env: &BTreeMap<String, String>)
-> Result<Command, String>
{
match *cmdline {
Run::Shell(ref data) => {
if default_shell.len() > 0 {
let mut cmd = Command::new(&default_shell[0]);
for arg in &default_shell[1..] {
if arg == "$cmdline" {
cmd.arg(data);
} else {
cmd.arg(arg);
}
}
return Ok(cmd);
} else
|
}
Run::Command(ref cmdline) => {
let cpath = find_cmd(&cmdline[0], &env)?;
let mut cmd = Command::new(&cpath);
cmd.args(&cmdline[1..]);
return Ok(cmd);
}
}
}
|
{
let mut cmd = Command::new("/bin/sh");
cmd.arg("-c");
cmd.arg(data);
cmd.arg("--");
return Ok(cmd);
}
|
conditional_block
|
util.rs
|
use std::path::{Path, PathBuf};
use std::collections::BTreeMap;
use unshare::{Command};
use crate::config::Container;
use crate::config::command::Run;
pub fn find_cmd(cmd: &str, env: &BTreeMap<String, String>)
-> Result<PathBuf, String>
{
if cmd.contains("/") {
return Ok(PathBuf::from(cmd));
} else {
if let Some(paths) = env.get(&"PATH".to_string()) {
for dir in paths[..].split(':') {
let path = Path::new(dir);
if!path.is_absolute() {
warn!("All items in PATH must be absolute, not {:?}",
path);
continue;
}
let path = path.join(cmd);
if path.exists() {
return Ok(path);
}
}
return Err(format!("Command {} not found in {:?}",
cmd, paths));
} else {
return Err(format!("Command {} is not absolute and no PATH set",
cmd));
}
}
}
pub fn warn_if_data_container(container_config: &Container) {
|
containers. Usually there are no system dirs at all.");
}
}
pub fn gen_command(default_shell: &Vec<String>, cmdline: &Run,
env: &BTreeMap<String, String>)
-> Result<Command, String>
{
match *cmdline {
Run::Shell(ref data) => {
if default_shell.len() > 0 {
let mut cmd = Command::new(&default_shell[0]);
for arg in &default_shell[1..] {
if arg == "$cmdline" {
cmd.arg(data);
} else {
cmd.arg(arg);
}
}
return Ok(cmd);
} else {
let mut cmd = Command::new("/bin/sh");
cmd.arg("-c");
cmd.arg(data);
cmd.arg("--");
return Ok(cmd);
}
}
Run::Command(ref cmdline) => {
let cpath = find_cmd(&cmdline[0], &env)?;
let mut cmd = Command::new(&cpath);
cmd.args(&cmdline[1..]);
return Ok(cmd);
}
}
}
|
if container_config.is_data_container() {
warn!("You are trying to run command inside the data container. \
Data containers is designed to use as volumes inside other \
|
random_line_split
|
debug_output.rs
|
use flowgger::config::Config;
use flowgger::merger::Merger;
use std::io::{stdout, Write};
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use std::thread;
use super::Output;
pub struct DebugOutput;
impl DebugOutput {
pub fn new(_config: &Config) -> DebugOutput {
DebugOutput
}
}
impl Output for DebugOutput {
fn
|
(&self, arx: Arc<Mutex<Receiver<Vec<u8>>>>, merger: Option<Box<Merger>>) {
let merger = match merger {
Some(merger) => Some(merger.clone_boxed()),
None => None,
};
thread::spawn(move || {
loop {
let mut bytes = match {
arx.lock().unwrap().recv()
} {
Ok(line) => line,
Err(_) => return,
};
if let Some(ref merger) = merger {
merger.frame(&mut bytes);
}
let out = String::from_utf8_lossy(&bytes);
print!("{}", out);
let _ = stdout().flush();
}
});
}
}
|
start
|
identifier_name
|
debug_output.rs
|
use flowgger::config::Config;
use flowgger::merger::Merger;
use std::io::{stdout, Write};
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use std::thread;
use super::Output;
pub struct DebugOutput;
impl DebugOutput {
pub fn new(_config: &Config) -> DebugOutput {
DebugOutput
}
}
impl Output for DebugOutput {
fn start(&self, arx: Arc<Mutex<Receiver<Vec<u8>>>>, merger: Option<Box<Merger>>) {
let merger = match merger {
Some(merger) => Some(merger.clone_boxed()),
None => None,
};
thread::spawn(move || {
loop {
let mut bytes = match {
arx.lock().unwrap().recv()
} {
Ok(line) => line,
Err(_) => return,
};
if let Some(ref merger) = merger {
|
let _ = stdout().flush();
}
});
}
}
|
merger.frame(&mut bytes);
}
let out = String::from_utf8_lossy(&bytes);
print!("{}", out);
|
random_line_split
|
debug_output.rs
|
use flowgger::config::Config;
use flowgger::merger::Merger;
use std::io::{stdout, Write};
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use std::thread;
use super::Output;
pub struct DebugOutput;
impl DebugOutput {
pub fn new(_config: &Config) -> DebugOutput
|
}
impl Output for DebugOutput {
fn start(&self, arx: Arc<Mutex<Receiver<Vec<u8>>>>, merger: Option<Box<Merger>>) {
let merger = match merger {
Some(merger) => Some(merger.clone_boxed()),
None => None,
};
thread::spawn(move || {
loop {
let mut bytes = match {
arx.lock().unwrap().recv()
} {
Ok(line) => line,
Err(_) => return,
};
if let Some(ref merger) = merger {
merger.frame(&mut bytes);
}
let out = String::from_utf8_lossy(&bytes);
print!("{}", out);
let _ = stdout().flush();
}
});
}
}
|
{
DebugOutput
}
|
identifier_body
|
extern-crosscrate.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
//aux-build:extern-crosscrate-source.rs
extern mod externcallback(vers = "0.1");
#[fixed_stack_segment] #[inline(never)]
fn fact(n: uint) -> uint
|
pub fn main() {
let result = fact(10u);
info2!("result = {}", result);
assert_eq!(result, 3628800u);
}
|
{
unsafe {
info2!("n = {}", n);
externcallback::rustrt::rust_dbg_call(externcallback::cb, n)
}
}
|
identifier_body
|
extern-crosscrate.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
//aux-build:extern-crosscrate-source.rs
extern mod externcallback(vers = "0.1");
#[fixed_stack_segment] #[inline(never)]
fn fact(n: uint) -> uint {
unsafe {
info2!("n = {}", n);
externcallback::rustrt::rust_dbg_call(externcallback::cb, n)
}
}
pub fn main() {
let result = fact(10u);
|
info2!("result = {}", result);
assert_eq!(result, 3628800u);
}
|
random_line_split
|
|
extern-crosscrate.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
//aux-build:extern-crosscrate-source.rs
extern mod externcallback(vers = "0.1");
#[fixed_stack_segment] #[inline(never)]
fn
|
(n: uint) -> uint {
unsafe {
info2!("n = {}", n);
externcallback::rustrt::rust_dbg_call(externcallback::cb, n)
}
}
pub fn main() {
let result = fact(10u);
info2!("result = {}", result);
assert_eq!(result, 3628800u);
}
|
fact
|
identifier_name
|
line_reader.rs
|
use std::io::{self, Read};
use std::str::{self, Utf8Error};
#[derive(Debug)]
struct ReaderBuf {
vec: Vec<u8>,
msg_start: usize,
write_loc: usize
}
impl ReaderBuf {
pub fn new(buffer_size: usize) -> ReaderBuf {
let mut vec = Vec::with_capacity(buffer_size);
unsafe { vec.set_len(buffer_size); }
ReaderBuf {
vec: vec,
msg_start: 0,
write_loc: 0
}
}
/// Take any unread bytes and move them to the beginning of the buffer.
/// Reset the msg_start to 0 and write_loc to the length of the unwritten bytes.
/// This is useful when there is an incomplete messge, and we want to continue
/// reading new bytes so that we can eventually complete a message. Note that while
/// this is somewhat inefficient, most decoders expect contiguous slices, so we can't
/// use a ring buffer without some copying anyway. Additionally, this lets us re-use
/// the buffer without an allocation, and is a simpler implementation.
///
/// TODO: This can almost certainly be made faster with unsafe code.
pub fn reset(&mut self) {
let mut write_index = 0;
for i in self.msg_start..self.write_loc {
self.vec[write_index] = self.vec[i];
write_index = write_index + 1;
}
self.write_loc = write_index;
self.msg_start = 0;
}
pub fn is_empty(&self) -> bool {
self.write_loc == 0
}
}
/// An iterator over lines available from the current spot in the buffer
pub struct Iter<'a> {
buf: &'a mut ReaderBuf
}
impl<'a> Iterator for Iter<'a> {
type Item = Result<String, Utf8Error>;
fn next(&mut self) -> Option<Result<String, Utf8Error>> {
if self.buf.msg_start == self.buf.write_loc {
self.buf.reset();
return None;
}
let slice = &self.buf.vec[self.buf.msg_start..self.buf.write_loc];
match slice.iter().position(|&c| c == '\n' as u8) {
Some(index) => {
self.buf.msg_start = self.buf.msg_start + index + 1;
Some(str::from_utf8(&slice[0..index+1]).map(|s| s.to_string()))
},
None => None
}
}
}
/// Read and compose lines of text
#[derive(Debug)]
pub struct LineReader {
buf: ReaderBuf
}
impl LineReader {
pub fn new(buffer_size: usize) -> LineReader {
LineReader {
buf: ReaderBuf::new(buffer_size)
}
}
pub fn read<T: Read>(&mut self, reader: &mut T) -> io::Result<usize> {
let bytes_read = try!(reader.read(&mut self.buf.vec[self.buf.write_loc..]));
self.buf.write_loc += bytes_read;
Ok(bytes_read)
}
pub fn iter_mut(&mut self) -> Iter {
Iter {
buf: &mut self.buf
}
}
pub fn is_empty(&self) -> bool {
self.buf.is_empty()
}
}
|
const TEXT: &'static str = "hello\nworld\nhow's\nit\ngoing?\n";
#[test]
fn static_buffer_single_read() {
let mut data = Cursor::new(TEXT);
let mut line_reader = LineReader::new(1024);
let bytes_read = line_reader.read(&mut data).unwrap();
assert_eq!(false, line_reader.is_empty());
assert_eq!(TEXT.len(), bytes_read);
assert_eq!(5, line_reader.iter_mut().count());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(true, line_reader.is_empty());
}
#[test]
fn static_buffer_partial_read_follow_by_complete_read() {
let mut string = TEXT.to_string();
string.push_str("ok");
let mut data = Cursor::new(&string);
let mut line_reader = LineReader::new(1024);
let bytes_read = line_reader.read(&mut data).unwrap();
assert_eq!(false, line_reader.is_empty());
assert_eq!(string.len(), bytes_read);
assert_eq!(5, line_reader.iter_mut().count());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(false, line_reader.is_empty());
assert_eq!(1, line_reader.read(&mut Cursor::new("\n")).unwrap());
assert_eq!("ok\n".to_string(), line_reader.iter_mut().next().unwrap().unwrap());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(true, line_reader.is_empty());
}
}
|
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::LineReader;
|
random_line_split
|
line_reader.rs
|
use std::io::{self, Read};
use std::str::{self, Utf8Error};
#[derive(Debug)]
struct ReaderBuf {
vec: Vec<u8>,
msg_start: usize,
write_loc: usize
}
impl ReaderBuf {
pub fn new(buffer_size: usize) -> ReaderBuf {
let mut vec = Vec::with_capacity(buffer_size);
unsafe { vec.set_len(buffer_size); }
ReaderBuf {
vec: vec,
msg_start: 0,
write_loc: 0
}
}
/// Take any unread bytes and move them to the beginning of the buffer.
/// Reset the msg_start to 0 and write_loc to the length of the unwritten bytes.
/// This is useful when there is an incomplete messge, and we want to continue
/// reading new bytes so that we can eventually complete a message. Note that while
/// this is somewhat inefficient, most decoders expect contiguous slices, so we can't
/// use a ring buffer without some copying anyway. Additionally, this lets us re-use
/// the buffer without an allocation, and is a simpler implementation.
///
/// TODO: This can almost certainly be made faster with unsafe code.
pub fn reset(&mut self) {
let mut write_index = 0;
for i in self.msg_start..self.write_loc {
self.vec[write_index] = self.vec[i];
write_index = write_index + 1;
}
self.write_loc = write_index;
self.msg_start = 0;
}
pub fn is_empty(&self) -> bool {
self.write_loc == 0
}
}
/// An iterator over lines available from the current spot in the buffer
pub struct Iter<'a> {
buf: &'a mut ReaderBuf
}
impl<'a> Iterator for Iter<'a> {
type Item = Result<String, Utf8Error>;
fn next(&mut self) -> Option<Result<String, Utf8Error>> {
if self.buf.msg_start == self.buf.write_loc {
self.buf.reset();
return None;
}
let slice = &self.buf.vec[self.buf.msg_start..self.buf.write_loc];
match slice.iter().position(|&c| c == '\n' as u8) {
Some(index) => {
self.buf.msg_start = self.buf.msg_start + index + 1;
Some(str::from_utf8(&slice[0..index+1]).map(|s| s.to_string()))
},
None => None
}
}
}
/// Read and compose lines of text
#[derive(Debug)]
pub struct LineReader {
buf: ReaderBuf
}
impl LineReader {
pub fn new(buffer_size: usize) -> LineReader {
LineReader {
buf: ReaderBuf::new(buffer_size)
}
}
pub fn read<T: Read>(&mut self, reader: &mut T) -> io::Result<usize> {
let bytes_read = try!(reader.read(&mut self.buf.vec[self.buf.write_loc..]));
self.buf.write_loc += bytes_read;
Ok(bytes_read)
}
pub fn iter_mut(&mut self) -> Iter {
Iter {
buf: &mut self.buf
}
}
pub fn is_empty(&self) -> bool {
self.buf.is_empty()
}
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::LineReader;
const TEXT: &'static str = "hello\nworld\nhow's\nit\ngoing?\n";
#[test]
fn static_buffer_single_read()
|
#[test]
fn static_buffer_partial_read_follow_by_complete_read() {
let mut string = TEXT.to_string();
string.push_str("ok");
let mut data = Cursor::new(&string);
let mut line_reader = LineReader::new(1024);
let bytes_read = line_reader.read(&mut data).unwrap();
assert_eq!(false, line_reader.is_empty());
assert_eq!(string.len(), bytes_read);
assert_eq!(5, line_reader.iter_mut().count());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(false, line_reader.is_empty());
assert_eq!(1, line_reader.read(&mut Cursor::new("\n")).unwrap());
assert_eq!("ok\n".to_string(), line_reader.iter_mut().next().unwrap().unwrap());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(true, line_reader.is_empty());
}
}
|
{
let mut data = Cursor::new(TEXT);
let mut line_reader = LineReader::new(1024);
let bytes_read = line_reader.read(&mut data).unwrap();
assert_eq!(false, line_reader.is_empty());
assert_eq!(TEXT.len(), bytes_read);
assert_eq!(5, line_reader.iter_mut().count());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(true, line_reader.is_empty());
}
|
identifier_body
|
line_reader.rs
|
use std::io::{self, Read};
use std::str::{self, Utf8Error};
#[derive(Debug)]
struct ReaderBuf {
vec: Vec<u8>,
msg_start: usize,
write_loc: usize
}
impl ReaderBuf {
pub fn new(buffer_size: usize) -> ReaderBuf {
let mut vec = Vec::with_capacity(buffer_size);
unsafe { vec.set_len(buffer_size); }
ReaderBuf {
vec: vec,
msg_start: 0,
write_loc: 0
}
}
/// Take any unread bytes and move them to the beginning of the buffer.
/// Reset the msg_start to 0 and write_loc to the length of the unwritten bytes.
/// This is useful when there is an incomplete messge, and we want to continue
/// reading new bytes so that we can eventually complete a message. Note that while
/// this is somewhat inefficient, most decoders expect contiguous slices, so we can't
/// use a ring buffer without some copying anyway. Additionally, this lets us re-use
/// the buffer without an allocation, and is a simpler implementation.
///
/// TODO: This can almost certainly be made faster with unsafe code.
pub fn reset(&mut self) {
let mut write_index = 0;
for i in self.msg_start..self.write_loc {
self.vec[write_index] = self.vec[i];
write_index = write_index + 1;
}
self.write_loc = write_index;
self.msg_start = 0;
}
pub fn is_empty(&self) -> bool {
self.write_loc == 0
}
}
/// An iterator over lines available from the current spot in the buffer
pub struct Iter<'a> {
buf: &'a mut ReaderBuf
}
impl<'a> Iterator for Iter<'a> {
type Item = Result<String, Utf8Error>;
fn next(&mut self) -> Option<Result<String, Utf8Error>> {
if self.buf.msg_start == self.buf.write_loc
|
let slice = &self.buf.vec[self.buf.msg_start..self.buf.write_loc];
match slice.iter().position(|&c| c == '\n' as u8) {
Some(index) => {
self.buf.msg_start = self.buf.msg_start + index + 1;
Some(str::from_utf8(&slice[0..index+1]).map(|s| s.to_string()))
},
None => None
}
}
}
/// Read and compose lines of text
#[derive(Debug)]
pub struct LineReader {
buf: ReaderBuf
}
impl LineReader {
pub fn new(buffer_size: usize) -> LineReader {
LineReader {
buf: ReaderBuf::new(buffer_size)
}
}
pub fn read<T: Read>(&mut self, reader: &mut T) -> io::Result<usize> {
let bytes_read = try!(reader.read(&mut self.buf.vec[self.buf.write_loc..]));
self.buf.write_loc += bytes_read;
Ok(bytes_read)
}
pub fn iter_mut(&mut self) -> Iter {
Iter {
buf: &mut self.buf
}
}
pub fn is_empty(&self) -> bool {
self.buf.is_empty()
}
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::LineReader;
const TEXT: &'static str = "hello\nworld\nhow's\nit\ngoing?\n";
#[test]
fn static_buffer_single_read() {
let mut data = Cursor::new(TEXT);
let mut line_reader = LineReader::new(1024);
let bytes_read = line_reader.read(&mut data).unwrap();
assert_eq!(false, line_reader.is_empty());
assert_eq!(TEXT.len(), bytes_read);
assert_eq!(5, line_reader.iter_mut().count());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(true, line_reader.is_empty());
}
#[test]
fn static_buffer_partial_read_follow_by_complete_read() {
let mut string = TEXT.to_string();
string.push_str("ok");
let mut data = Cursor::new(&string);
let mut line_reader = LineReader::new(1024);
let bytes_read = line_reader.read(&mut data).unwrap();
assert_eq!(false, line_reader.is_empty());
assert_eq!(string.len(), bytes_read);
assert_eq!(5, line_reader.iter_mut().count());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(false, line_reader.is_empty());
assert_eq!(1, line_reader.read(&mut Cursor::new("\n")).unwrap());
assert_eq!("ok\n".to_string(), line_reader.iter_mut().next().unwrap().unwrap());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(true, line_reader.is_empty());
}
}
|
{
self.buf.reset();
return None;
}
|
conditional_block
|
line_reader.rs
|
use std::io::{self, Read};
use std::str::{self, Utf8Error};
#[derive(Debug)]
struct ReaderBuf {
vec: Vec<u8>,
msg_start: usize,
write_loc: usize
}
impl ReaderBuf {
pub fn new(buffer_size: usize) -> ReaderBuf {
let mut vec = Vec::with_capacity(buffer_size);
unsafe { vec.set_len(buffer_size); }
ReaderBuf {
vec: vec,
msg_start: 0,
write_loc: 0
}
}
/// Take any unread bytes and move them to the beginning of the buffer.
/// Reset the msg_start to 0 and write_loc to the length of the unwritten bytes.
/// This is useful when there is an incomplete messge, and we want to continue
/// reading new bytes so that we can eventually complete a message. Note that while
/// this is somewhat inefficient, most decoders expect contiguous slices, so we can't
/// use a ring buffer without some copying anyway. Additionally, this lets us re-use
/// the buffer without an allocation, and is a simpler implementation.
///
/// TODO: This can almost certainly be made faster with unsafe code.
pub fn reset(&mut self) {
let mut write_index = 0;
for i in self.msg_start..self.write_loc {
self.vec[write_index] = self.vec[i];
write_index = write_index + 1;
}
self.write_loc = write_index;
self.msg_start = 0;
}
pub fn
|
(&self) -> bool {
self.write_loc == 0
}
}
/// An iterator over lines available from the current spot in the buffer
pub struct Iter<'a> {
buf: &'a mut ReaderBuf
}
impl<'a> Iterator for Iter<'a> {
type Item = Result<String, Utf8Error>;
fn next(&mut self) -> Option<Result<String, Utf8Error>> {
if self.buf.msg_start == self.buf.write_loc {
self.buf.reset();
return None;
}
let slice = &self.buf.vec[self.buf.msg_start..self.buf.write_loc];
match slice.iter().position(|&c| c == '\n' as u8) {
Some(index) => {
self.buf.msg_start = self.buf.msg_start + index + 1;
Some(str::from_utf8(&slice[0..index+1]).map(|s| s.to_string()))
},
None => None
}
}
}
/// Read and compose lines of text
#[derive(Debug)]
pub struct LineReader {
buf: ReaderBuf
}
impl LineReader {
pub fn new(buffer_size: usize) -> LineReader {
LineReader {
buf: ReaderBuf::new(buffer_size)
}
}
pub fn read<T: Read>(&mut self, reader: &mut T) -> io::Result<usize> {
let bytes_read = try!(reader.read(&mut self.buf.vec[self.buf.write_loc..]));
self.buf.write_loc += bytes_read;
Ok(bytes_read)
}
pub fn iter_mut(&mut self) -> Iter {
Iter {
buf: &mut self.buf
}
}
pub fn is_empty(&self) -> bool {
self.buf.is_empty()
}
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::LineReader;
const TEXT: &'static str = "hello\nworld\nhow's\nit\ngoing?\n";
#[test]
fn static_buffer_single_read() {
let mut data = Cursor::new(TEXT);
let mut line_reader = LineReader::new(1024);
let bytes_read = line_reader.read(&mut data).unwrap();
assert_eq!(false, line_reader.is_empty());
assert_eq!(TEXT.len(), bytes_read);
assert_eq!(5, line_reader.iter_mut().count());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(true, line_reader.is_empty());
}
#[test]
fn static_buffer_partial_read_follow_by_complete_read() {
let mut string = TEXT.to_string();
string.push_str("ok");
let mut data = Cursor::new(&string);
let mut line_reader = LineReader::new(1024);
let bytes_read = line_reader.read(&mut data).unwrap();
assert_eq!(false, line_reader.is_empty());
assert_eq!(string.len(), bytes_read);
assert_eq!(5, line_reader.iter_mut().count());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(false, line_reader.is_empty());
assert_eq!(1, line_reader.read(&mut Cursor::new("\n")).unwrap());
assert_eq!("ok\n".to_string(), line_reader.iter_mut().next().unwrap().unwrap());
assert_eq!(None, line_reader.iter_mut().next());
assert_eq!(true, line_reader.is_empty());
}
}
|
is_empty
|
identifier_name
|
pipe-sleep.rs
|
// xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::timer::sleep;
use std::uv;
use core::pipes;
use core::pipes::recv;
proto! oneshot (
waiting:send {
signal ->!
}
)
pub fn main()
|
{
use oneshot::client::*;
let c = pipes::spawn_service(oneshot::init, |p| { recv(p); });
let iotask = &uv::global_loop::get();
sleep(iotask, 500);
signal(c);
}
|
identifier_body
|
|
pipe-sleep.rs
|
// xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::timer::sleep;
use std::uv;
use core::pipes;
use core::pipes::recv;
proto! oneshot (
waiting:send {
signal ->!
}
)
pub fn main() {
use oneshot::client::*;
let c = pipes::spawn_service(oneshot::init, |p| { recv(p); });
|
signal(c);
}
|
let iotask = &uv::global_loop::get();
sleep(iotask, 500);
|
random_line_split
|
pipe-sleep.rs
|
// xfail-fast
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::timer::sleep;
use std::uv;
use core::pipes;
use core::pipes::recv;
proto! oneshot (
waiting:send {
signal ->!
}
)
pub fn
|
() {
use oneshot::client::*;
let c = pipes::spawn_service(oneshot::init, |p| { recv(p); });
let iotask = &uv::global_loop::get();
sleep(iotask, 500);
signal(c);
}
|
main
|
identifier_name
|
svggraphicselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::inheritance::Castable;
use crate::dom::document::Document;
use crate::dom::svgelement::SVGElement;
use crate::dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use style::element_state::ElementState;
#[dom_struct]
pub struct SVGGraphicsElement {
svgelement: SVGElement,
}
impl SVGGraphicsElement {
pub fn new_inherited(
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> SVGGraphicsElement {
SVGGraphicsElement::new_inherited_with_state(
ElementState::empty(),
tag_name,
prefix,
document,
)
}
pub fn new_inherited_with_state(
state: ElementState,
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> SVGGraphicsElement
|
}
impl VirtualMethods for SVGGraphicsElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<SVGElement>() as &dyn VirtualMethods)
}
}
|
{
SVGGraphicsElement {
svgelement: SVGElement::new_inherited_with_state(state, tag_name, prefix, document),
}
}
|
identifier_body
|
svggraphicselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::inheritance::Castable;
use crate::dom::document::Document;
use crate::dom::svgelement::SVGElement;
use crate::dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use style::element_state::ElementState;
|
}
impl SVGGraphicsElement {
pub fn new_inherited(
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> SVGGraphicsElement {
SVGGraphicsElement::new_inherited_with_state(
ElementState::empty(),
tag_name,
prefix,
document,
)
}
pub fn new_inherited_with_state(
state: ElementState,
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> SVGGraphicsElement {
SVGGraphicsElement {
svgelement: SVGElement::new_inherited_with_state(state, tag_name, prefix, document),
}
}
}
impl VirtualMethods for SVGGraphicsElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<SVGElement>() as &dyn VirtualMethods)
}
}
|
#[dom_struct]
pub struct SVGGraphicsElement {
svgelement: SVGElement,
|
random_line_split
|
svggraphicselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::inheritance::Castable;
use crate::dom::document::Document;
use crate::dom::svgelement::SVGElement;
use crate::dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use style::element_state::ElementState;
#[dom_struct]
pub struct
|
{
svgelement: SVGElement,
}
impl SVGGraphicsElement {
pub fn new_inherited(
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> SVGGraphicsElement {
SVGGraphicsElement::new_inherited_with_state(
ElementState::empty(),
tag_name,
prefix,
document,
)
}
pub fn new_inherited_with_state(
state: ElementState,
tag_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> SVGGraphicsElement {
SVGGraphicsElement {
svgelement: SVGElement::new_inherited_with_state(state, tag_name, prefix, document),
}
}
}
impl VirtualMethods for SVGGraphicsElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<SVGElement>() as &dyn VirtualMethods)
}
}
|
SVGGraphicsElement
|
identifier_name
|
cauchy.rs
|
// Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Cauchy distribution.
use num_traits::{Float, FloatConst};
use crate::{Distribution, Standard};
use rand::Rng;
use core::fmt;
/// The Cauchy distribution `Cauchy(median, scale)`.
///
/// This distribution has a density function:
/// `f(x) = 1 / (pi * scale * (1 + ((x - median) / scale)^2))`
///
/// Note that at least for `f32`, results are not fully portable due to minor
/// differences in the target system's *tan* implementation, `tanf`.
///
/// # Example
///
/// ```
/// use rand_distr::{Cauchy, Distribution};
///
/// let cau = Cauchy::new(2.0, 5.0).unwrap();
/// let v = cau.sample(&mut rand::thread_rng());
/// println!("{} is from a Cauchy(2, 5) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
pub struct Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
median: F,
scale: F,
}
/// Error type returned from `Cauchy::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `scale <= 0` or `nan`.
ScaleTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ScaleTooSmall => "scale is not positive in Cauchy distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
/// Construct a new `Cauchy` with the given shape parameters
/// `median` the peak location and `scale` the scale factor.
pub fn new(median: F, scale: F) -> Result<Cauchy<F>, Error> {
if!(scale > F::zero()) {
return Err(Error::ScaleTooSmall);
}
Ok(Cauchy { median, scale })
}
}
impl<F> Distribution<F> for Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> F {
// sample from [0, 1)
let x = Standard.sample(rng);
// get standard cauchy random number
// note that π/2 is not exactly representable, even if x=0.5 the result is finite
let comp_dev = (F::PI() * x).tan();
// shift and scale according to parameters
self.median + self.scale * comp_dev
}
}
#[cfg(test)]
mod test {
use super::*;
fn median(mut numbers: &mut [f64]) -> f64 {
sort(&mut numbers);
let mid = numbers.len() / 2;
numbers[mid]
}
fn sort(numbers: &mut [f64]) {
numbers.sort_by(|a, b| a.partial_cmp(b).unwrap());
}
#[test]
fn test_cauchy_averages() {
|
}
#[test]
#[should_panic]
fn test_cauchy_invalid_scale_zero() {
Cauchy::new(0.0, 0.0).unwrap();
}
#[test]
#[should_panic]
fn test_cauchy_invalid_scale_neg() {
Cauchy::new(0.0, -10.0).unwrap();
}
#[test]
fn value_stability() {
fn gen_samples<F: Float + FloatConst + core::fmt::Debug>(m: F, s: F, buf: &mut [F])
where Standard: Distribution<F> {
let distr = Cauchy::new(m, s).unwrap();
let mut rng = crate::test::rng(353);
for x in buf {
*x = rng.sample(&distr);
}
}
let mut buf = [0.0; 4];
gen_samples(100f64, 10.0, &mut buf);
assert_eq!(&buf, &[
77.93369152808678,
90.1606912098641,
125.31516221323625,
86.10217834773925
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
let mut buf = [0.0; 4];
gen_samples(10f32, 7.0, &mut buf);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for (a, b) in buf.iter().zip(expected.iter()) {
assert_almost_eq!(*a, *b, 1e-5);
}
}
}
|
// NOTE: given that the variance and mean are undefined,
// this test does not have any rigorous statistical meaning.
let cauchy = Cauchy::new(10.0, 5.0).unwrap();
let mut rng = crate::test::rng(123);
let mut numbers: [f64; 1000] = [0.0; 1000];
let mut sum = 0.0;
for number in &mut numbers[..] {
*number = cauchy.sample(&mut rng);
sum += *number;
}
let median = median(&mut numbers);
#[cfg(feature = "std")]
std::println!("Cauchy median: {}", median);
assert!((median - 10.0).abs() < 0.4); // not 100% certain, but probable enough
let mean = sum / 1000.0;
#[cfg(feature = "std")]
std::println!("Cauchy mean: {}", mean);
// for a Cauchy distribution the mean should not converge
assert!((mean - 10.0).abs() > 0.4); // not 100% certain, but probable enough
|
identifier_body
|
cauchy.rs
|
// Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Cauchy distribution.
use num_traits::{Float, FloatConst};
use crate::{Distribution, Standard};
use rand::Rng;
use core::fmt;
/// The Cauchy distribution `Cauchy(median, scale)`.
///
/// This distribution has a density function:
/// `f(x) = 1 / (pi * scale * (1 + ((x - median) / scale)^2))`
///
/// Note that at least for `f32`, results are not fully portable due to minor
/// differences in the target system's *tan* implementation, `tanf`.
///
/// # Example
///
/// ```
/// use rand_distr::{Cauchy, Distribution};
///
/// let cau = Cauchy::new(2.0, 5.0).unwrap();
/// let v = cau.sample(&mut rand::thread_rng());
/// println!("{} is from a Cauchy(2, 5) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
pub struct Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
median: F,
scale: F,
}
/// Error type returned from `Cauchy::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `scale <= 0` or `nan`.
ScaleTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ScaleTooSmall => "scale is not positive in Cauchy distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
/// Construct a new `Cauchy` with the given shape parameters
/// `median` the peak location and `scale` the scale factor.
pub fn new(median: F, scale: F) -> Result<Cauchy<F>, Error> {
if!(scale > F::zero()) {
return Err(Error::ScaleTooSmall);
}
Ok(Cauchy { median, scale })
}
}
impl<F> Distribution<F> for Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> F {
// sample from [0, 1)
let x = Standard.sample(rng);
// get standard cauchy random number
// note that π/2 is not exactly representable, even if x=0.5 the result is finite
let comp_dev = (F::PI() * x).tan();
// shift and scale according to parameters
self.median + self.scale * comp_dev
}
}
#[cfg(test)]
mod test {
use super::*;
fn median(mut numbers: &mut [f64]) -> f64 {
sort(&mut numbers);
let mid = numbers.len() / 2;
numbers[mid]
}
fn sort(numbers: &mut [f64]) {
numbers.sort_by(|a, b| a.partial_cmp(b).unwrap());
}
#[test]
fn t
|
) {
// NOTE: given that the variance and mean are undefined,
// this test does not have any rigorous statistical meaning.
let cauchy = Cauchy::new(10.0, 5.0).unwrap();
let mut rng = crate::test::rng(123);
let mut numbers: [f64; 1000] = [0.0; 1000];
let mut sum = 0.0;
for number in &mut numbers[..] {
*number = cauchy.sample(&mut rng);
sum += *number;
}
let median = median(&mut numbers);
#[cfg(feature = "std")]
std::println!("Cauchy median: {}", median);
assert!((median - 10.0).abs() < 0.4); // not 100% certain, but probable enough
let mean = sum / 1000.0;
#[cfg(feature = "std")]
std::println!("Cauchy mean: {}", mean);
// for a Cauchy distribution the mean should not converge
assert!((mean - 10.0).abs() > 0.4); // not 100% certain, but probable enough
}
#[test]
#[should_panic]
fn test_cauchy_invalid_scale_zero() {
Cauchy::new(0.0, 0.0).unwrap();
}
#[test]
#[should_panic]
fn test_cauchy_invalid_scale_neg() {
Cauchy::new(0.0, -10.0).unwrap();
}
#[test]
fn value_stability() {
fn gen_samples<F: Float + FloatConst + core::fmt::Debug>(m: F, s: F, buf: &mut [F])
where Standard: Distribution<F> {
let distr = Cauchy::new(m, s).unwrap();
let mut rng = crate::test::rng(353);
for x in buf {
*x = rng.sample(&distr);
}
}
let mut buf = [0.0; 4];
gen_samples(100f64, 10.0, &mut buf);
assert_eq!(&buf, &[
77.93369152808678,
90.1606912098641,
125.31516221323625,
86.10217834773925
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
let mut buf = [0.0; 4];
gen_samples(10f32, 7.0, &mut buf);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for (a, b) in buf.iter().zip(expected.iter()) {
assert_almost_eq!(*a, *b, 1e-5);
}
}
}
|
est_cauchy_averages(
|
identifier_name
|
cauchy.rs
|
// Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Cauchy distribution.
use num_traits::{Float, FloatConst};
use crate::{Distribution, Standard};
use rand::Rng;
use core::fmt;
/// The Cauchy distribution `Cauchy(median, scale)`.
///
/// This distribution has a density function:
/// `f(x) = 1 / (pi * scale * (1 + ((x - median) / scale)^2))`
///
/// Note that at least for `f32`, results are not fully portable due to minor
/// differences in the target system's *tan* implementation, `tanf`.
///
/// # Example
///
/// ```
/// use rand_distr::{Cauchy, Distribution};
///
/// let cau = Cauchy::new(2.0, 5.0).unwrap();
/// let v = cau.sample(&mut rand::thread_rng());
/// println!("{} is from a Cauchy(2, 5) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
pub struct Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
median: F,
scale: F,
}
/// Error type returned from `Cauchy::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `scale <= 0` or `nan`.
ScaleTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ScaleTooSmall => "scale is not positive in Cauchy distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
/// Construct a new `Cauchy` with the given shape parameters
/// `median` the peak location and `scale` the scale factor.
pub fn new(median: F, scale: F) -> Result<Cauchy<F>, Error> {
if!(scale > F::zero()) {
return Err(Error::ScaleTooSmall);
}
Ok(Cauchy { median, scale })
}
}
impl<F> Distribution<F> for Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> F {
// sample from [0, 1)
let x = Standard.sample(rng);
// get standard cauchy random number
// note that π/2 is not exactly representable, even if x=0.5 the result is finite
let comp_dev = (F::PI() * x).tan();
// shift and scale according to parameters
self.median + self.scale * comp_dev
}
}
#[cfg(test)]
mod test {
use super::*;
fn median(mut numbers: &mut [f64]) -> f64 {
sort(&mut numbers);
let mid = numbers.len() / 2;
numbers[mid]
}
fn sort(numbers: &mut [f64]) {
numbers.sort_by(|a, b| a.partial_cmp(b).unwrap());
}
#[test]
fn test_cauchy_averages() {
// NOTE: given that the variance and mean are undefined,
// this test does not have any rigorous statistical meaning.
let cauchy = Cauchy::new(10.0, 5.0).unwrap();
let mut rng = crate::test::rng(123);
let mut numbers: [f64; 1000] = [0.0; 1000];
let mut sum = 0.0;
for number in &mut numbers[..] {
*number = cauchy.sample(&mut rng);
sum += *number;
}
let median = median(&mut numbers);
#[cfg(feature = "std")]
std::println!("Cauchy median: {}", median);
assert!((median - 10.0).abs() < 0.4); // not 100% certain, but probable enough
let mean = sum / 1000.0;
#[cfg(feature = "std")]
std::println!("Cauchy mean: {}", mean);
// for a Cauchy distribution the mean should not converge
assert!((mean - 10.0).abs() > 0.4); // not 100% certain, but probable enough
}
#[test]
#[should_panic]
fn test_cauchy_invalid_scale_zero() {
Cauchy::new(0.0, 0.0).unwrap();
}
#[test]
#[should_panic]
fn test_cauchy_invalid_scale_neg() {
Cauchy::new(0.0, -10.0).unwrap();
}
#[test]
fn value_stability() {
fn gen_samples<F: Float + FloatConst + core::fmt::Debug>(m: F, s: F, buf: &mut [F])
where Standard: Distribution<F> {
let distr = Cauchy::new(m, s).unwrap();
let mut rng = crate::test::rng(353);
for x in buf {
*x = rng.sample(&distr);
}
}
let mut buf = [0.0; 4];
gen_samples(100f64, 10.0, &mut buf);
assert_eq!(&buf, &[
77.93369152808678,
90.1606912098641,
125.31516221323625,
86.10217834773925
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
let mut buf = [0.0; 4];
gen_samples(10f32, 7.0, &mut buf);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for (a, b) in buf.iter().zip(expected.iter()) {
assert_almost_eq!(*a, *b, 1e-5);
}
}
}
|
random_line_split
|
|
table.rs
|
use std::cmp::max;
use std::env;
use std::ops::Deref;
use std::sync::{Mutex, MutexGuard};
use datetime::TimeZone;
use zoneinfo_compiled::{CompiledData, Result as TZResult};
use lazy_static::lazy_static;
use log::*;
use users::UsersCache;
use crate::fs::{File, fields as f};
use crate::fs::feature::git::GitCache;
use crate::output::cell::TextCell;
use crate::output::render::TimeRender;
use crate::output::time::TimeFormat;
use crate::theme::Theme;
/// Options for displaying a table.
#[derive(PartialEq, Debug)]
pub struct Options {
pub size_format: SizeFormat,
pub time_format: TimeFormat,
pub user_format: UserFormat,
pub columns: Columns,
}
/// Extra columns to display in the table.
#[allow(clippy::struct_excessive_bools)]
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct Columns {
/// At least one of these timestamps will be shown.
pub time_types: TimeTypes,
// The rest are just on/off
pub inode: bool,
pub links: bool,
pub blocks: bool,
pub group: bool,
pub git: bool,
pub octal: bool,
// Defaults to true:
pub permissions: bool,
pub filesize: bool,
pub user: bool,
}
impl Columns {
pub fn collect(&self, actually_enable_git: bool) -> Vec<Column> {
let mut columns = Vec::with_capacity(4);
if self.inode {
columns.push(Column::Inode);
}
if self.octal {
columns.push(Column::Octal);
}
if self.permissions {
columns.push(Column::Permissions);
}
if self.links {
columns.push(Column::HardLinks);
}
if self.filesize {
columns.push(Column::FileSize);
}
if self.blocks {
columns.push(Column::Blocks);
}
if self.user {
columns.push(Column::User);
}
if self.group {
columns.push(Column::Group);
}
if self.time_types.modified {
columns.push(Column::Timestamp(TimeType::Modified));
}
if self.time_types.changed {
columns.push(Column::Timestamp(TimeType::Changed));
}
if self.time_types.created {
columns.push(Column::Timestamp(TimeType::Created));
}
if self.time_types.accessed {
columns.push(Column::Timestamp(TimeType::Accessed));
}
if self.git && actually_enable_git {
columns.push(Column::GitStatus);
}
columns
}
}
/// A table contains these.
#[derive(Debug, Copy, Clone)]
pub enum Column {
Permissions,
FileSize,
Timestamp(TimeType),
Blocks,
User,
Group,
HardLinks,
Inode,
GitStatus,
Octal,
}
/// Each column can pick its own **Alignment**. Usually, numbers are
/// right-aligned, and text is left-aligned.
#[derive(Copy, Clone)]
pub enum Alignment {
Left,
Right,
}
impl Column {
/// Get the alignment this column should use.
pub fn alignment(self) -> Alignment {
match self {
Self::FileSize |
Self::HardLinks |
Self::Inode |
Self::Blocks |
Self::GitStatus => Alignment::Right,
_ => Alignment::Left,
}
}
/// Get the text that should be printed at the top, when the user elects
/// to have a header row printed.
pub fn header(self) -> &'static str {
match self {
Self::Permissions => "Permissions",
Self::FileSize => "Size",
Self::Timestamp(t) => t.header(),
Self::Blocks => "Blocks",
Self::User => "User",
Self::Group => "Group",
Self::HardLinks => "Links",
Self::Inode => "inode",
Self::GitStatus => "Git",
Self::Octal => "Octal",
}
}
}
/// Formatting options for file sizes.
#[allow(clippy::pub_enum_variant_names)]
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum SizeFormat {
/// Format the file size using **decimal** prefixes, such as “kilo”,
/// “mega”, or “giga”.
DecimalBytes,
/// Format the file size using **binary** prefixes, such as “kibi”,
/// “mebi”, or “gibi”.
BinaryBytes,
/// Do no formatting and just display the size as a number of bytes.
JustBytes,
}
/// Formatting options for user and group.
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum UserFormat {
/// The
|
Numeric,
/// Show the name
Name,
}
impl Default for SizeFormat {
fn default() -> Self {
Self::DecimalBytes
}
}
/// The types of a file’s time fields. These three fields are standard
/// across most (all?) operating systems.
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum TimeType {
/// The file’s modified time (`st_mtime`).
Modified,
/// The file’s changed time (`st_ctime`)
Changed,
/// The file’s accessed time (`st_atime`).
Accessed,
/// The file’s creation time (`btime` or `birthtime`).
Created,
}
impl TimeType {
/// Returns the text to use for a column’s heading in the columns output.
pub fn header(self) -> &'static str {
match self {
Self::Modified => "Date Modified",
Self::Changed => "Date Changed",
Self::Accessed => "Date Accessed",
Self::Created => "Date Created",
}
}
}
/// Fields for which of a file’s time fields should be displayed in the
/// columns output.
///
/// There should always be at least one of these — there’s no way to disable
/// the time columns entirely (yet).
#[derive(PartialEq, Debug, Copy, Clone)]
#[allow(clippy::struct_excessive_bools)]
pub struct TimeTypes {
pub modified: bool,
pub changed: bool,
pub accessed: bool,
pub created: bool,
}
impl Default for TimeTypes {
/// By default, display just the ‘modified’ time. This is the most
/// common option, which is why it has this shorthand.
fn default() -> Self {
Self {
modified: true,
changed: false,
accessed: false,
created: false,
}
}
}
/// The **environment** struct contains any data that could change between
/// running instances of exa, depending on the user’s computer’s configuration.
///
/// Any environment field should be able to be mocked up for test runs.
pub struct Environment {
/// Localisation rules for formatting numbers.
numeric: locale::Numeric,
/// The computer’s current time zone. This gets used to determine how to
/// offset files’ timestamps.
tz: Option<TimeZone>,
/// Mapping cache of user IDs to usernames.
users: Mutex<UsersCache>,
}
impl Environment {
pub fn lock_users(&self) -> MutexGuard<'_, UsersCache> {
self.users.lock().unwrap()
}
fn load_all() -> Self {
let tz = match determine_time_zone() {
Ok(t) => {
Some(t)
}
Err(ref e) => {
println!("Unable to determine time zone: {}", e);
None
}
};
let numeric = locale::Numeric::load_user_locale()
.unwrap_or_else(|_| locale::Numeric::english());
let users = Mutex::new(UsersCache::new());
Self { numeric, tz, users }
}
}
fn determine_time_zone() -> TZResult<TimeZone> {
if let Ok(file) = env::var("TZ") {
TimeZone::from_file({
if file.starts_with('/') {
file
} else {
format!("/usr/share/zoneinfo/{}", {
if file.starts_with(':') {
file.replacen(":", "", 1)
} else {
file
}
})
}
})
} else {
TimeZone::from_file("/etc/localtime")
}
}
lazy_static! {
static ref ENVIRONMENT: Environment = Environment::load_all();
}
pub struct Table<'a> {
columns: Vec<Column>,
theme: &'a Theme,
env: &'a Environment,
widths: TableWidths,
time_format: TimeFormat,
size_format: SizeFormat,
user_format: UserFormat,
git: Option<&'a GitCache>,
}
#[derive(Clone)]
pub struct Row {
cells: Vec<TextCell>,
}
impl<'a, 'f> Table<'a> {
pub fn new(options: &'a Options, git: Option<&'a GitCache>, theme: &'a Theme) -> Table<'a> {
let columns = options.columns.collect(git.is_some());
let widths = TableWidths::zero(columns.len());
let env = &*ENVIRONMENT;
Table {
theme,
widths,
columns,
git,
env,
time_format: options.time_format,
size_format: options.size_format,
user_format: options.user_format,
}
}
pub fn widths(&self) -> &TableWidths {
&self.widths
}
pub fn header_row(&self) -> Row {
let cells = self.columns.iter()
.map(|c| TextCell::paint_str(self.theme.ui.header, c.header()))
.collect();
Row { cells }
}
pub fn row_for_file(&self, file: &File<'_>, xattrs: bool) -> Row {
let cells = self.columns.iter()
.map(|c| self.display(file, *c, xattrs))
.collect();
Row { cells }
}
pub fn add_widths(&mut self, row: &Row) {
self.widths.add_widths(row)
}
fn permissions_plus(&self, file: &File<'_>, xattrs: bool) -> f::PermissionsPlus {
f::PermissionsPlus {
file_type: file.type_char(),
permissions: file.permissions(),
xattrs,
}
}
fn octal_permissions(&self, file: &File<'_>) -> f::OctalPermissions {
f::OctalPermissions {
permissions: file.permissions(),
}
}
fn display(&self, file: &File<'_>, column: Column, xattrs: bool) -> TextCell {
match column {
Column::Permissions => {
self.permissions_plus(file, xattrs).render(self.theme)
}
Column::FileSize => {
file.size().render(self.theme, self.size_format, &self.env.numeric)
}
Column::HardLinks => {
file.links().render(self.theme, &self.env.numeric)
}
Column::Inode => {
file.inode().render(self.theme.ui.inode)
}
Column::Blocks => {
file.blocks().render(self.theme)
}
Column::User => {
file.user().render(self.theme, &*self.env.lock_users(), self.user_format)
}
Column::Group => {
file.group().render(self.theme, &*self.env.lock_users(), self.user_format)
}
Column::GitStatus => {
self.git_status(file).render(self.theme)
}
Column::Octal => {
self.octal_permissions(file).render(self.theme.ui.octal)
}
Column::Timestamp(TimeType::Modified) => {
file.modified_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Changed) => {
file.changed_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Created) => {
file.created_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Accessed) => {
file.accessed_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
}
}
fn git_status(&self, file: &File<'_>) -> f::Git {
debug!("Getting Git status for file {:?}", file.path);
self.git
.map(|g| g.get(&file.path, file.is_directory()))
.unwrap_or_default()
}
pub fn render(&self, row: Row) -> TextCell {
let mut cell = TextCell::default();
let iter = row.cells.into_iter()
.zip(self.widths.iter())
.enumerate();
for (n, (this_cell, width)) in iter {
let padding = width - *this_cell.width;
match self.columns[n].alignment() {
Alignment::Left => {
cell.append(this_cell);
cell.add_spaces(padding);
}
Alignment::Right => {
cell.add_spaces(padding);
cell.append(this_cell);
}
}
cell.add_spaces(1);
}
cell
}
}
pub struct TableWidths(Vec<usize>);
impl Deref for TableWidths {
type Target = [usize];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl TableWidths {
pub fn zero(count: usize) -> Self {
Self(vec![0; count])
}
pub fn add_widths(&mut self, row: &Row) {
for (old_width, cell) in self.0.iter_mut().zip(row.cells.iter()) {
*old_width = max(*old_width, *cell.width);
}
}
pub fn total(&self) -> usize {
self.0.len() + self.0.iter().sum::<usize>()
}
}
|
UID / GID
|
identifier_name
|
table.rs
|
use std::cmp::max;
use std::env;
use std::ops::Deref;
use std::sync::{Mutex, MutexGuard};
use datetime::TimeZone;
use zoneinfo_compiled::{CompiledData, Result as TZResult};
use lazy_static::lazy_static;
use log::*;
use users::UsersCache;
use crate::fs::{File, fields as f};
use crate::fs::feature::git::GitCache;
use crate::output::cell::TextCell;
use crate::output::render::TimeRender;
use crate::output::time::TimeFormat;
use crate::theme::Theme;
/// Options for displaying a table.
#[derive(PartialEq, Debug)]
pub struct Options {
pub size_format: SizeFormat,
pub time_format: TimeFormat,
pub user_format: UserFormat,
pub columns: Columns,
}
/// Extra columns to display in the table.
#[allow(clippy::struct_excessive_bools)]
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct Columns {
/// At least one of these timestamps will be shown.
pub time_types: TimeTypes,
// The rest are just on/off
pub inode: bool,
pub links: bool,
pub blocks: bool,
pub group: bool,
pub git: bool,
pub octal: bool,
// Defaults to true:
pub permissions: bool,
pub filesize: bool,
pub user: bool,
}
impl Columns {
pub fn collect(&self, actually_enable_git: bool) -> Vec<Column>
|
columns.push(Column::FileSize);
}
if self.blocks {
columns.push(Column::Blocks);
}
if self.user {
columns.push(Column::User);
}
if self.group {
columns.push(Column::Group);
}
if self.time_types.modified {
columns.push(Column::Timestamp(TimeType::Modified));
}
if self.time_types.changed {
columns.push(Column::Timestamp(TimeType::Changed));
}
if self.time_types.created {
columns.push(Column::Timestamp(TimeType::Created));
}
if self.time_types.accessed {
columns.push(Column::Timestamp(TimeType::Accessed));
}
if self.git && actually_enable_git {
columns.push(Column::GitStatus);
}
columns
}
}
/// A table contains these.
#[derive(Debug, Copy, Clone)]
pub enum Column {
Permissions,
FileSize,
Timestamp(TimeType),
Blocks,
User,
Group,
HardLinks,
Inode,
GitStatus,
Octal,
}
/// Each column can pick its own **Alignment**. Usually, numbers are
/// right-aligned, and text is left-aligned.
#[derive(Copy, Clone)]
pub enum Alignment {
Left,
Right,
}
impl Column {
/// Get the alignment this column should use.
pub fn alignment(self) -> Alignment {
match self {
Self::FileSize |
Self::HardLinks |
Self::Inode |
Self::Blocks |
Self::GitStatus => Alignment::Right,
_ => Alignment::Left,
}
}
/// Get the text that should be printed at the top, when the user elects
/// to have a header row printed.
pub fn header(self) -> &'static str {
match self {
Self::Permissions => "Permissions",
Self::FileSize => "Size",
Self::Timestamp(t) => t.header(),
Self::Blocks => "Blocks",
Self::User => "User",
Self::Group => "Group",
Self::HardLinks => "Links",
Self::Inode => "inode",
Self::GitStatus => "Git",
Self::Octal => "Octal",
}
}
}
/// Formatting options for file sizes.
#[allow(clippy::pub_enum_variant_names)]
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum SizeFormat {
/// Format the file size using **decimal** prefixes, such as “kilo”,
/// “mega”, or “giga”.
DecimalBytes,
/// Format the file size using **binary** prefixes, such as “kibi”,
/// “mebi”, or “gibi”.
BinaryBytes,
/// Do no formatting and just display the size as a number of bytes.
JustBytes,
}
/// Formatting options for user and group.
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum UserFormat {
/// The UID / GID
Numeric,
/// Show the name
Name,
}
impl Default for SizeFormat {
fn default() -> Self {
Self::DecimalBytes
}
}
/// The types of a file’s time fields. These three fields are standard
/// across most (all?) operating systems.
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum TimeType {
/// The file’s modified time (`st_mtime`).
Modified,
/// The file’s changed time (`st_ctime`)
Changed,
/// The file’s accessed time (`st_atime`).
Accessed,
/// The file’s creation time (`btime` or `birthtime`).
Created,
}
impl TimeType {
/// Returns the text to use for a column’s heading in the columns output.
pub fn header(self) -> &'static str {
match self {
Self::Modified => "Date Modified",
Self::Changed => "Date Changed",
Self::Accessed => "Date Accessed",
Self::Created => "Date Created",
}
}
}
/// Fields for which of a file’s time fields should be displayed in the
/// columns output.
///
/// There should always be at least one of these — there’s no way to disable
/// the time columns entirely (yet).
#[derive(PartialEq, Debug, Copy, Clone)]
#[allow(clippy::struct_excessive_bools)]
pub struct TimeTypes {
pub modified: bool,
pub changed: bool,
pub accessed: bool,
pub created: bool,
}
impl Default for TimeTypes {
/// By default, display just the ‘modified’ time. This is the most
/// common option, which is why it has this shorthand.
fn default() -> Self {
Self {
modified: true,
changed: false,
accessed: false,
created: false,
}
}
}
/// The **environment** struct contains any data that could change between
/// running instances of exa, depending on the user’s computer’s configuration.
///
/// Any environment field should be able to be mocked up for test runs.
pub struct Environment {
/// Localisation rules for formatting numbers.
numeric: locale::Numeric,
/// The computer’s current time zone. This gets used to determine how to
/// offset files’ timestamps.
tz: Option<TimeZone>,
/// Mapping cache of user IDs to usernames.
users: Mutex<UsersCache>,
}
impl Environment {
pub fn lock_users(&self) -> MutexGuard<'_, UsersCache> {
self.users.lock().unwrap()
}
fn load_all() -> Self {
let tz = match determine_time_zone() {
Ok(t) => {
Some(t)
}
Err(ref e) => {
println!("Unable to determine time zone: {}", e);
None
}
};
let numeric = locale::Numeric::load_user_locale()
.unwrap_or_else(|_| locale::Numeric::english());
let users = Mutex::new(UsersCache::new());
Self { numeric, tz, users }
}
}
fn determine_time_zone() -> TZResult<TimeZone> {
if let Ok(file) = env::var("TZ") {
TimeZone::from_file({
if file.starts_with('/') {
file
} else {
format!("/usr/share/zoneinfo/{}", {
if file.starts_with(':') {
file.replacen(":", "", 1)
} else {
file
}
})
}
})
} else {
TimeZone::from_file("/etc/localtime")
}
}
lazy_static! {
static ref ENVIRONMENT: Environment = Environment::load_all();
}
pub struct Table<'a> {
columns: Vec<Column>,
theme: &'a Theme,
env: &'a Environment,
widths: TableWidths,
time_format: TimeFormat,
size_format: SizeFormat,
user_format: UserFormat,
git: Option<&'a GitCache>,
}
#[derive(Clone)]
pub struct Row {
cells: Vec<TextCell>,
}
impl<'a, 'f> Table<'a> {
pub fn new(options: &'a Options, git: Option<&'a GitCache>, theme: &'a Theme) -> Table<'a> {
let columns = options.columns.collect(git.is_some());
let widths = TableWidths::zero(columns.len());
let env = &*ENVIRONMENT;
Table {
theme,
widths,
columns,
git,
env,
time_format: options.time_format,
size_format: options.size_format,
user_format: options.user_format,
}
}
pub fn widths(&self) -> &TableWidths {
&self.widths
}
pub fn header_row(&self) -> Row {
let cells = self.columns.iter()
.map(|c| TextCell::paint_str(self.theme.ui.header, c.header()))
.collect();
Row { cells }
}
pub fn row_for_file(&self, file: &File<'_>, xattrs: bool) -> Row {
let cells = self.columns.iter()
.map(|c| self.display(file, *c, xattrs))
.collect();
Row { cells }
}
pub fn add_widths(&mut self, row: &Row) {
self.widths.add_widths(row)
}
fn permissions_plus(&self, file: &File<'_>, xattrs: bool) -> f::PermissionsPlus {
f::PermissionsPlus {
file_type: file.type_char(),
permissions: file.permissions(),
xattrs,
}
}
fn octal_permissions(&self, file: &File<'_>) -> f::OctalPermissions {
f::OctalPermissions {
permissions: file.permissions(),
}
}
fn display(&self, file: &File<'_>, column: Column, xattrs: bool) -> TextCell {
match column {
Column::Permissions => {
self.permissions_plus(file, xattrs).render(self.theme)
}
Column::FileSize => {
file.size().render(self.theme, self.size_format, &self.env.numeric)
}
Column::HardLinks => {
file.links().render(self.theme, &self.env.numeric)
}
Column::Inode => {
file.inode().render(self.theme.ui.inode)
}
Column::Blocks => {
file.blocks().render(self.theme)
}
Column::User => {
file.user().render(self.theme, &*self.env.lock_users(), self.user_format)
}
Column::Group => {
file.group().render(self.theme, &*self.env.lock_users(), self.user_format)
}
Column::GitStatus => {
self.git_status(file).render(self.theme)
}
Column::Octal => {
self.octal_permissions(file).render(self.theme.ui.octal)
}
Column::Timestamp(TimeType::Modified) => {
file.modified_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Changed) => {
file.changed_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Created) => {
file.created_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Accessed) => {
file.accessed_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
}
}
fn git_status(&self, file: &File<'_>) -> f::Git {
debug!("Getting Git status for file {:?}", file.path);
self.git
.map(|g| g.get(&file.path, file.is_directory()))
.unwrap_or_default()
}
pub fn render(&self, row: Row) -> TextCell {
let mut cell = TextCell::default();
let iter = row.cells.into_iter()
.zip(self.widths.iter())
.enumerate();
for (n, (this_cell, width)) in iter {
let padding = width - *this_cell.width;
match self.columns[n].alignment() {
Alignment::Left => {
cell.append(this_cell);
cell.add_spaces(padding);
}
Alignment::Right => {
cell.add_spaces(padding);
cell.append(this_cell);
}
}
cell.add_spaces(1);
}
cell
}
}
pub struct TableWidths(Vec<usize>);
impl Deref for TableWidths {
type Target = [usize];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl TableWidths {
pub fn zero(count: usize) -> Self {
Self(vec![0; count])
}
pub fn add_widths(&mut self, row: &Row) {
for (old_width, cell) in self.0.iter_mut().zip(row.cells.iter()) {
*old_width = max(*old_width, *cell.width);
}
}
pub fn total(&self) -> usize {
self.0.len() + self.0.iter().sum::<usize>()
}
}
|
{
let mut columns = Vec::with_capacity(4);
if self.inode {
columns.push(Column::Inode);
}
if self.octal {
columns.push(Column::Octal);
}
if self.permissions {
columns.push(Column::Permissions);
}
if self.links {
columns.push(Column::HardLinks);
}
if self.filesize {
|
identifier_body
|
table.rs
|
use std::cmp::max;
use std::env;
use std::ops::Deref;
use std::sync::{Mutex, MutexGuard};
use datetime::TimeZone;
use zoneinfo_compiled::{CompiledData, Result as TZResult};
use lazy_static::lazy_static;
use log::*;
use users::UsersCache;
use crate::fs::{File, fields as f};
use crate::fs::feature::git::GitCache;
use crate::output::cell::TextCell;
use crate::output::render::TimeRender;
use crate::output::time::TimeFormat;
use crate::theme::Theme;
/// Options for displaying a table.
#[derive(PartialEq, Debug)]
pub struct Options {
pub size_format: SizeFormat,
pub time_format: TimeFormat,
pub user_format: UserFormat,
pub columns: Columns,
}
/// Extra columns to display in the table.
#[allow(clippy::struct_excessive_bools)]
#[derive(PartialEq, Debug, Copy, Clone)]
pub struct Columns {
/// At least one of these timestamps will be shown.
pub time_types: TimeTypes,
// The rest are just on/off
pub inode: bool,
pub links: bool,
pub blocks: bool,
pub group: bool,
pub git: bool,
pub octal: bool,
// Defaults to true:
pub permissions: bool,
pub filesize: bool,
pub user: bool,
}
impl Columns {
pub fn collect(&self, actually_enable_git: bool) -> Vec<Column> {
let mut columns = Vec::with_capacity(4);
if self.inode {
columns.push(Column::Inode);
}
if self.octal {
columns.push(Column::Octal);
}
if self.permissions {
columns.push(Column::Permissions);
}
if self.links {
columns.push(Column::HardLinks);
}
if self.filesize {
columns.push(Column::FileSize);
}
if self.blocks {
columns.push(Column::Blocks);
}
if self.user {
columns.push(Column::User);
}
if self.group {
|
if self.time_types.modified {
columns.push(Column::Timestamp(TimeType::Modified));
}
if self.time_types.changed {
columns.push(Column::Timestamp(TimeType::Changed));
}
if self.time_types.created {
columns.push(Column::Timestamp(TimeType::Created));
}
if self.time_types.accessed {
columns.push(Column::Timestamp(TimeType::Accessed));
}
if self.git && actually_enable_git {
columns.push(Column::GitStatus);
}
columns
}
}
/// A table contains these.
#[derive(Debug, Copy, Clone)]
pub enum Column {
Permissions,
FileSize,
Timestamp(TimeType),
Blocks,
User,
Group,
HardLinks,
Inode,
GitStatus,
Octal,
}
/// Each column can pick its own **Alignment**. Usually, numbers are
/// right-aligned, and text is left-aligned.
#[derive(Copy, Clone)]
pub enum Alignment {
Left,
Right,
}
impl Column {
/// Get the alignment this column should use.
pub fn alignment(self) -> Alignment {
match self {
Self::FileSize |
Self::HardLinks |
Self::Inode |
Self::Blocks |
Self::GitStatus => Alignment::Right,
_ => Alignment::Left,
}
}
/// Get the text that should be printed at the top, when the user elects
/// to have a header row printed.
pub fn header(self) -> &'static str {
match self {
Self::Permissions => "Permissions",
Self::FileSize => "Size",
Self::Timestamp(t) => t.header(),
Self::Blocks => "Blocks",
Self::User => "User",
Self::Group => "Group",
Self::HardLinks => "Links",
Self::Inode => "inode",
Self::GitStatus => "Git",
Self::Octal => "Octal",
}
}
}
/// Formatting options for file sizes.
#[allow(clippy::pub_enum_variant_names)]
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum SizeFormat {
/// Format the file size using **decimal** prefixes, such as “kilo”,
/// “mega”, or “giga”.
DecimalBytes,
/// Format the file size using **binary** prefixes, such as “kibi”,
/// “mebi”, or “gibi”.
BinaryBytes,
/// Do no formatting and just display the size as a number of bytes.
JustBytes,
}
/// Formatting options for user and group.
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum UserFormat {
/// The UID / GID
Numeric,
/// Show the name
Name,
}
impl Default for SizeFormat {
fn default() -> Self {
Self::DecimalBytes
}
}
/// The types of a file’s time fields. These three fields are standard
/// across most (all?) operating systems.
#[derive(PartialEq, Debug, Copy, Clone)]
pub enum TimeType {
/// The file’s modified time (`st_mtime`).
Modified,
/// The file’s changed time (`st_ctime`)
Changed,
/// The file’s accessed time (`st_atime`).
Accessed,
/// The file’s creation time (`btime` or `birthtime`).
Created,
}
impl TimeType {
/// Returns the text to use for a column’s heading in the columns output.
pub fn header(self) -> &'static str {
match self {
Self::Modified => "Date Modified",
Self::Changed => "Date Changed",
Self::Accessed => "Date Accessed",
Self::Created => "Date Created",
}
}
}
/// Fields for which of a file’s time fields should be displayed in the
/// columns output.
///
/// There should always be at least one of these — there’s no way to disable
/// the time columns entirely (yet).
#[derive(PartialEq, Debug, Copy, Clone)]
#[allow(clippy::struct_excessive_bools)]
pub struct TimeTypes {
pub modified: bool,
pub changed: bool,
pub accessed: bool,
pub created: bool,
}
impl Default for TimeTypes {
/// By default, display just the ‘modified’ time. This is the most
/// common option, which is why it has this shorthand.
fn default() -> Self {
Self {
modified: true,
changed: false,
accessed: false,
created: false,
}
}
}
/// The **environment** struct contains any data that could change between
/// running instances of exa, depending on the user’s computer’s configuration.
///
/// Any environment field should be able to be mocked up for test runs.
pub struct Environment {
/// Localisation rules for formatting numbers.
numeric: locale::Numeric,
/// The computer’s current time zone. This gets used to determine how to
/// offset files’ timestamps.
tz: Option<TimeZone>,
/// Mapping cache of user IDs to usernames.
users: Mutex<UsersCache>,
}
impl Environment {
pub fn lock_users(&self) -> MutexGuard<'_, UsersCache> {
self.users.lock().unwrap()
}
fn load_all() -> Self {
let tz = match determine_time_zone() {
Ok(t) => {
Some(t)
}
Err(ref e) => {
println!("Unable to determine time zone: {}", e);
None
}
};
let numeric = locale::Numeric::load_user_locale()
.unwrap_or_else(|_| locale::Numeric::english());
let users = Mutex::new(UsersCache::new());
Self { numeric, tz, users }
}
}
fn determine_time_zone() -> TZResult<TimeZone> {
if let Ok(file) = env::var("TZ") {
TimeZone::from_file({
if file.starts_with('/') {
file
} else {
format!("/usr/share/zoneinfo/{}", {
if file.starts_with(':') {
file.replacen(":", "", 1)
} else {
file
}
})
}
})
} else {
TimeZone::from_file("/etc/localtime")
}
}
lazy_static! {
static ref ENVIRONMENT: Environment = Environment::load_all();
}
pub struct Table<'a> {
columns: Vec<Column>,
theme: &'a Theme,
env: &'a Environment,
widths: TableWidths,
time_format: TimeFormat,
size_format: SizeFormat,
user_format: UserFormat,
git: Option<&'a GitCache>,
}
#[derive(Clone)]
pub struct Row {
cells: Vec<TextCell>,
}
impl<'a, 'f> Table<'a> {
pub fn new(options: &'a Options, git: Option<&'a GitCache>, theme: &'a Theme) -> Table<'a> {
let columns = options.columns.collect(git.is_some());
let widths = TableWidths::zero(columns.len());
let env = &*ENVIRONMENT;
Table {
theme,
widths,
columns,
git,
env,
time_format: options.time_format,
size_format: options.size_format,
user_format: options.user_format,
}
}
pub fn widths(&self) -> &TableWidths {
&self.widths
}
pub fn header_row(&self) -> Row {
let cells = self.columns.iter()
.map(|c| TextCell::paint_str(self.theme.ui.header, c.header()))
.collect();
Row { cells }
}
pub fn row_for_file(&self, file: &File<'_>, xattrs: bool) -> Row {
let cells = self.columns.iter()
.map(|c| self.display(file, *c, xattrs))
.collect();
Row { cells }
}
pub fn add_widths(&mut self, row: &Row) {
self.widths.add_widths(row)
}
fn permissions_plus(&self, file: &File<'_>, xattrs: bool) -> f::PermissionsPlus {
f::PermissionsPlus {
file_type: file.type_char(),
permissions: file.permissions(),
xattrs,
}
}
fn octal_permissions(&self, file: &File<'_>) -> f::OctalPermissions {
f::OctalPermissions {
permissions: file.permissions(),
}
}
fn display(&self, file: &File<'_>, column: Column, xattrs: bool) -> TextCell {
match column {
Column::Permissions => {
self.permissions_plus(file, xattrs).render(self.theme)
}
Column::FileSize => {
file.size().render(self.theme, self.size_format, &self.env.numeric)
}
Column::HardLinks => {
file.links().render(self.theme, &self.env.numeric)
}
Column::Inode => {
file.inode().render(self.theme.ui.inode)
}
Column::Blocks => {
file.blocks().render(self.theme)
}
Column::User => {
file.user().render(self.theme, &*self.env.lock_users(), self.user_format)
}
Column::Group => {
file.group().render(self.theme, &*self.env.lock_users(), self.user_format)
}
Column::GitStatus => {
self.git_status(file).render(self.theme)
}
Column::Octal => {
self.octal_permissions(file).render(self.theme.ui.octal)
}
Column::Timestamp(TimeType::Modified) => {
file.modified_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Changed) => {
file.changed_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Created) => {
file.created_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
Column::Timestamp(TimeType::Accessed) => {
file.accessed_time().render(self.theme.ui.date, &self.env.tz, self.time_format)
}
}
}
fn git_status(&self, file: &File<'_>) -> f::Git {
debug!("Getting Git status for file {:?}", file.path);
self.git
.map(|g| g.get(&file.path, file.is_directory()))
.unwrap_or_default()
}
pub fn render(&self, row: Row) -> TextCell {
let mut cell = TextCell::default();
let iter = row.cells.into_iter()
.zip(self.widths.iter())
.enumerate();
for (n, (this_cell, width)) in iter {
let padding = width - *this_cell.width;
match self.columns[n].alignment() {
Alignment::Left => {
cell.append(this_cell);
cell.add_spaces(padding);
}
Alignment::Right => {
cell.add_spaces(padding);
cell.append(this_cell);
}
}
cell.add_spaces(1);
}
cell
}
}
pub struct TableWidths(Vec<usize>);
impl Deref for TableWidths {
type Target = [usize];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl TableWidths {
pub fn zero(count: usize) -> Self {
Self(vec![0; count])
}
pub fn add_widths(&mut self, row: &Row) {
for (old_width, cell) in self.0.iter_mut().zip(row.cells.iter()) {
*old_width = max(*old_width, *cell.width);
}
}
pub fn total(&self) -> usize {
self.0.len() + self.0.iter().sum::<usize>()
}
}
|
columns.push(Column::Group);
}
|
random_line_split
|
auth_macros.rs
|
macro_rules! auth_module (($auth_name:ident,
$verify_name:ident,
$keybytes:expr,
$tagbytes:expr) => (
use libc::{c_ulonglong};
use randombytes::randombytes_into;
use rustc_serialize;
pub const KEYBYTES: usize = $keybytes;
pub const TAGBYTES: usize = $tagbytes;
/// Authentication `Key`
///
/// When a `Key` goes out of scope its contents
/// will be zeroed out
pub struct Key(pub [u8; KEYBYTES]);
newtype_drop!(Key);
newtype_clone!(Key);
newtype_impl!(Key, KEYBYTES);
/// Authentication `Tag`
///
/// The tag implements the traits `PartialEq` and `Eq` using constant-time
/// comparison functions. See `sodiumoxide::crypto::verify::safe_memcmp`
#[derive(Copy)]
pub struct Tag(pub [u8; TAGBYTES]);
newtype_clone!(Tag);
newtype_impl!(Tag, TAGBYTES);
non_secret_newtype_impl!(Tag);
/// `gen_key()` randomly generates a key for authentication
///
/// THREAD SAFETY: `gen_key()` is thread-safe provided that you have
/// called `sodiumoxide::init()` once before using any other function
/// from sodiumoxide.
pub fn gen_key() -> Key {
let mut k = [0; KEYBYTES];
randombytes_into(&mut k);
Key(k)
}
/// `authenticate()` authenticates a message `m` using a secret key `k`.
/// The function returns an authenticator tag.
pub fn authenticate(m: &[u8],
&Key(ref k): &Key) -> Tag {
unsafe {
let mut tag = [0; TAGBYTES];
$auth_name(&mut tag,
m.as_ptr(),
m.len() as c_ulonglong,
k);
Tag(tag)
}
}
/// `verify()` returns `true` if `tag` is a correct authenticator of message `m`
/// under a secret key `k`. Otherwise it returns false.
pub fn verify(&Tag(ref tag): &Tag, m: &[u8],
&Key(ref k): &Key) -> bool {
unsafe {
$verify_name(tag,
m.as_ptr(),
m.len() as c_ulonglong,
k) == 0
}
}
#[cfg(test)]
mod test_m {
use super::*;
use test_utils::round_trip;
#[test]
fn test_auth_verify() {
use randombytes::randombytes;
for i in (0..256usize) {
let k = gen_key();
let m = randombytes(i);
let tag = authenticate(&m, &k);
assert!(verify(&tag, &m, &k));
}
}
#[test]
fn test_auth_verify_tamper() {
use randombytes::randombytes;
for i in (0..32usize) {
let k = gen_key();
let mut m = randombytes(i);
let Tag(mut tagbuf) = authenticate(&mut m, &k);
for j in (0..m.len()) {
m[j] ^= 0x20;
assert!(!verify(&Tag(tagbuf), &mut m, &k));
m[j] ^= 0x20;
}
for j in (0..tagbuf.len()) {
tagbuf[j] ^= 0x20;
assert!(!verify(&Tag(tagbuf), &mut m, &k));
tagbuf[j] ^= 0x20;
}
}
}
#[test]
fn test_serialisation() {
use randombytes::randombytes;
for i in (0..256usize) {
let k = gen_key();
let m = randombytes(i);
let tag = authenticate(&m, &k);
round_trip(k);
round_trip(tag);
}
}
}
#[cfg(feature = "benchmarks")]
#[cfg(test)]
mod bench_m {
extern crate test;
use randombytes::randombytes;
use super::*;
const BENCH_SIZES: [usize; 14] = [0, 1, 2, 4, 8, 16, 32, 64,
128, 256, 512, 1024, 2048, 4096];
#[bench]
fn bench_auth(b: &mut test::Bencher) {
let k = gen_key();
let ms: Vec<Vec<u8>> = BENCH_SIZES.iter().map(|s| {
randombytes(*s)
}).collect();
b.iter(|| {
for m in ms.iter() {
authenticate(&m, &k);
}
});
}
#[bench]
fn bench_verify(b: &mut test::Bencher) {
let k = gen_key();
let ms: Vec<Vec<u8>> = BENCH_SIZES.iter().map(|s| {
randombytes(*s)
}).collect();
let tags: Vec<Tag> = ms.iter().map(|m| {
authenticate(&m, &k)
}).collect();
b.iter(|| {
for (m, t) in ms.iter().zip(tags.iter()) {
verify(t, &m, &k);
}
});
}
}
));
/// Macro for defining streaming authenticator tag computation types and functions.
///
/// Parameters:
/// $state_name - The authenticator state type.
/// SAFETY NOTE: This needs to be a type that does not define a `Drop`
/// implementation, otherwise undefined behaviour will occur.
/// $init_name - A function `f(s: *mut $state_name, k: *u8, klen: size_t)` that initializes
/// a state with a key.
/// $update_name - A function `f(s: *mut $state_name, m: *u8, mlen: size_t)` that updates
/// a state with a message chunk.
/// $final_name - A function `f(s: *mut $state_name, t: *u8)` that computes an authenticator tag of length $tagbytes from a $state_name.
/// $tagbytes - The number of bytes in an authenticator tag.
macro_rules! auth_state (($state_name:ident,
$init_name:ident,
$update_name:ident,
$final_name:ident,
$tagbytes:expr) => (
use libc::size_t;
use std::mem;
use ffi;
/// Authentication `State`
///
/// State for multi-part (streaming) authenticator tag computation.
///
/// When a `State` goes out of scope its contents will be zeroed out.
///
/// NOTE: the streaming interface takes variable length keys, as opposed to the
/// simple interface which takes a fixed length key. The streaming interface also does not
/// define its own `Key` type, instead using slices for its `init()` method.
/// The caller of the functions is responsible for zeroing out the key after it's been used
/// (in contrast to the simple interface which defines a `Drop` implementation for `Key`).
pub struct State($state_name);
impl Drop for State {
fn drop(&mut self) {
let &mut State(ref mut s) = self;
unsafe {
let sp: *mut $state_name = s;
ffi::sodium_memzero(sp as *mut u8, mem::size_of_val(s) as c_ulonglong);
}
}
}
impl State {
/// `init()` initializes an authentication structure using a secret key 'k'.
pub fn init(k: &[u8]) -> State {
unsafe {
let mut s = mem::uninitialized();
$init_name(&mut s, k.as_ptr(), k.len() as size_t);
State(s)
}
}
/// `update()` can be called more than once in order to compute the authenticator
/// from sequential chunks of the message.
pub fn update(&mut self, in_: &[u8]) {
let &mut State(ref mut state) = self;
unsafe {
$update_name(state, in_.as_ptr(), in_.len() as size_t);
}
}
/// `finalize()` finalizes the authenticator computation and returns a `Tag`.
pub fn finalize(&mut self) -> Tag {
unsafe {
let &mut State(ref mut state) = self;
|
$final_name(state, &mut tag);
Tag(tag)
}
}
}
#[cfg(test)]
mod test_s {
use super::*;
#[test]
fn test_auth_eq_auth_state() {
use randombytes::randombytes;
for i in (0..256usize) {
let k = gen_key();
let m = randombytes(i);
let tag = authenticate(&m, &k);
let mut state = State::init(&k[..]);
state.update(&m);
let tag2 = state.finalize();
assert_eq!(tag, tag2);
}
}
#[test]
fn test_auth_eq_auth_state_chunked() {
use randombytes::randombytes;
for i in (0..256usize) {
let k = gen_key();
let m = randombytes(i);
let tag = authenticate(&m, &k);
let mut state = State::init(&k[..]);
for c in m.chunks(1) {
state.update(c);
}
let tag2 = state.finalize();
assert_eq!(tag, tag2);
}
}
}
));
|
let mut tag = [0; $tagbytes as usize];
|
random_line_split
|
parity.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
|
use std::path::PathBuf;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
fn parity_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push(".parity");
home
}
fn parity_keystore(t: DirectoryType) -> PathBuf {
let mut dir = parity_dir_path();
match t {
DirectoryType::Testnet => {
dir.push("testnet_keys");
},
DirectoryType::Main => {
dir.push("keys");
}
}
dir
}
pub struct ParityDirectory {
dir: DiskDirectory,
}
impl ParityDirectory {
pub fn create(t: DirectoryType) -> Result<Self, Error> {
let result = ParityDirectory {
dir: DiskDirectory::create(parity_keystore(t))?,
};
Ok(result)
}
pub fn open(t: DirectoryType) -> Self {
ParityDirectory {
dir: DiskDirectory::at(parity_keystore(t)),
}
}
}
impl KeyDirectory for ParityDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
self.dir.load()
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
}
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
|
random_line_split
|
parity.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::path::PathBuf;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
fn parity_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push(".parity");
home
}
fn parity_keystore(t: DirectoryType) -> PathBuf {
let mut dir = parity_dir_path();
match t {
DirectoryType::Testnet => {
dir.push("testnet_keys");
},
DirectoryType::Main =>
|
}
dir
}
pub struct ParityDirectory {
dir: DiskDirectory,
}
impl ParityDirectory {
pub fn create(t: DirectoryType) -> Result<Self, Error> {
let result = ParityDirectory {
dir: DiskDirectory::create(parity_keystore(t))?,
};
Ok(result)
}
pub fn open(t: DirectoryType) -> Self {
ParityDirectory {
dir: DiskDirectory::at(parity_keystore(t)),
}
}
}
impl KeyDirectory for ParityDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
self.dir.load()
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
}
|
{
dir.push("keys");
}
|
conditional_block
|
parity.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::path::PathBuf;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
fn parity_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push(".parity");
home
}
fn parity_keystore(t: DirectoryType) -> PathBuf {
let mut dir = parity_dir_path();
match t {
DirectoryType::Testnet => {
dir.push("testnet_keys");
},
DirectoryType::Main => {
dir.push("keys");
}
}
dir
}
pub struct ParityDirectory {
dir: DiskDirectory,
}
impl ParityDirectory {
pub fn create(t: DirectoryType) -> Result<Self, Error> {
let result = ParityDirectory {
dir: DiskDirectory::create(parity_keystore(t))?,
};
Ok(result)
}
pub fn open(t: DirectoryType) -> Self {
ParityDirectory {
dir: DiskDirectory::at(parity_keystore(t)),
}
}
}
impl KeyDirectory for ParityDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error>
|
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
}
|
{
self.dir.load()
}
|
identifier_body
|
parity.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::path::PathBuf;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
fn parity_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push(".parity");
home
}
fn parity_keystore(t: DirectoryType) -> PathBuf {
let mut dir = parity_dir_path();
match t {
DirectoryType::Testnet => {
dir.push("testnet_keys");
},
DirectoryType::Main => {
dir.push("keys");
}
}
dir
}
pub struct ParityDirectory {
dir: DiskDirectory,
}
impl ParityDirectory {
pub fn create(t: DirectoryType) -> Result<Self, Error> {
let result = ParityDirectory {
dir: DiskDirectory::create(parity_keystore(t))?,
};
Ok(result)
}
pub fn open(t: DirectoryType) -> Self {
ParityDirectory {
dir: DiskDirectory::at(parity_keystore(t)),
}
}
}
impl KeyDirectory for ParityDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
self.dir.load()
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn
|
(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
}
|
remove
|
identifier_name
|
minkowski_difference_test.rs
|
extern crate quickcheck;
use TOLERANCE;
use maths::{DotProduct, Transform, UnitQuat, UnitVec3D, Vec3D};
use collisions::CollisionData;
use collisions::shapes::{Direction, SupportMap};
use collisions::shapes::convex_shapes::Cuboid;
use collisions::shapes::behaviour::support_map_behaviour;
use collisions::detection::gjkepa::MinkowskiDifference;
// TODO should this go under a behaviour for SupportMap?
#[test]
fn
|
() {
fn property(rotation: UnitQuat, random_direction: UnitVec3D) {
let control = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform::identity(),
);
let data = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform {
translation: Vec3D::zero(),
rotation: rotation.into(),
},
);
let diff = MinkowskiDifference(&control, &data);
let direction = Vec3D::from(random_direction);
let max_control = control.vertices_iter()
.max_by_key(|vertex| (vertex.dot(direction) / TOLERANCE) as i32)
.unwrap();
let max_neg_data = data.vertices_iter()
.max_by_key(|vertex| (-vertex.dot(direction) / TOLERANCE) as i32)
.unwrap();
let support_point = diff.support_points_iter(Direction::from(direction)).next().unwrap();
let support_point_distance = support_point.dot(direction);
let expected_support_point_distance =(max_control - max_neg_data).dot(direction);
assert_approx_eq!(support_point_distance, expected_support_point_distance);
}
quickcheck::quickcheck(property as fn(UnitQuat, UnitVec3D));
}
quickcheck! {
fn it_behaves_like_a_support_map(rotation: UnitQuat, input_direction: UnitVec3D) -> quickcheck::TestResult {
let control = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform::identity(),
);
let data = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform {
translation: Vec3D::zero(),
rotation: rotation.into(),
},
);
let diff = MinkowskiDifference(&control, &data);
quickcheck_expect!(support_map_behaviour(diff, input_direction));
quickcheck::TestResult::passed()
}
}
|
it_always_generates_a_valid_support_point
|
identifier_name
|
minkowski_difference_test.rs
|
extern crate quickcheck;
use TOLERANCE;
use maths::{DotProduct, Transform, UnitQuat, UnitVec3D, Vec3D};
use collisions::CollisionData;
use collisions::shapes::{Direction, SupportMap};
use collisions::shapes::convex_shapes::Cuboid;
use collisions::shapes::behaviour::support_map_behaviour;
use collisions::detection::gjkepa::MinkowskiDifference;
// TODO should this go under a behaviour for SupportMap?
#[test]
fn it_always_generates_a_valid_support_point() {
fn property(rotation: UnitQuat, random_direction: UnitVec3D)
|
let max_neg_data = data.vertices_iter()
.max_by_key(|vertex| (-vertex.dot(direction) / TOLERANCE) as i32)
.unwrap();
let support_point = diff.support_points_iter(Direction::from(direction)).next().unwrap();
let support_point_distance = support_point.dot(direction);
let expected_support_point_distance =(max_control - max_neg_data).dot(direction);
assert_approx_eq!(support_point_distance, expected_support_point_distance);
}
quickcheck::quickcheck(property as fn(UnitQuat, UnitVec3D));
}
quickcheck! {
fn it_behaves_like_a_support_map(rotation: UnitQuat, input_direction: UnitVec3D) -> quickcheck::TestResult {
let control = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform::identity(),
);
let data = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform {
translation: Vec3D::zero(),
rotation: rotation.into(),
},
);
let diff = MinkowskiDifference(&control, &data);
quickcheck_expect!(support_map_behaviour(diff, input_direction));
quickcheck::TestResult::passed()
}
}
|
{
let control = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform::identity(),
);
let data = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform {
translation: Vec3D::zero(),
rotation: rotation.into(),
},
);
let diff = MinkowskiDifference(&control, &data);
let direction = Vec3D::from(random_direction);
let max_control = control.vertices_iter()
.max_by_key(|vertex| (vertex.dot(direction) / TOLERANCE) as i32)
.unwrap();
|
identifier_body
|
minkowski_difference_test.rs
|
extern crate quickcheck;
use TOLERANCE;
use maths::{DotProduct, Transform, UnitQuat, UnitVec3D, Vec3D};
use collisions::CollisionData;
use collisions::shapes::{Direction, SupportMap};
use collisions::shapes::convex_shapes::Cuboid;
use collisions::shapes::behaviour::support_map_behaviour;
use collisions::detection::gjkepa::MinkowskiDifference;
// TODO should this go under a behaviour for SupportMap?
#[test]
fn it_always_generates_a_valid_support_point() {
fn property(rotation: UnitQuat, random_direction: UnitVec3D) {
let control = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
|
Box::new(Cuboid::cube(1.0)),
Transform {
translation: Vec3D::zero(),
rotation: rotation.into(),
},
);
let diff = MinkowskiDifference(&control, &data);
let direction = Vec3D::from(random_direction);
let max_control = control.vertices_iter()
.max_by_key(|vertex| (vertex.dot(direction) / TOLERANCE) as i32)
.unwrap();
let max_neg_data = data.vertices_iter()
.max_by_key(|vertex| (-vertex.dot(direction) / TOLERANCE) as i32)
.unwrap();
let support_point = diff.support_points_iter(Direction::from(direction)).next().unwrap();
let support_point_distance = support_point.dot(direction);
let expected_support_point_distance =(max_control - max_neg_data).dot(direction);
assert_approx_eq!(support_point_distance, expected_support_point_distance);
}
quickcheck::quickcheck(property as fn(UnitQuat, UnitVec3D));
}
quickcheck! {
fn it_behaves_like_a_support_map(rotation: UnitQuat, input_direction: UnitVec3D) -> quickcheck::TestResult {
let control = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform::identity(),
);
let data = CollisionData::new(
Box::new(Cuboid::cube(1.0)),
Transform {
translation: Vec3D::zero(),
rotation: rotation.into(),
},
);
let diff = MinkowskiDifference(&control, &data);
quickcheck_expect!(support_map_behaviour(diff, input_direction));
quickcheck::TestResult::passed()
}
}
|
Transform::identity(),
);
let data = CollisionData::new(
|
random_line_split
|
guard.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::JSContext;
use js::rust::HandleObject;
use servo_config::prefs::PREFS;
/// A container with a condition.
pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
}
|
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => PREFS.get(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
|
}
/// A condition to expose things.
pub enum Condition {
|
random_line_split
|
guard.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::JSContext;
use js::rust::HandleObject;
use servo_config::prefs::PREFS;
/// A container with a condition.
pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T>
|
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => PREFS.get(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
|
{
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
}
|
identifier_body
|
guard.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::JSContext;
use js::rust::HandleObject;
use servo_config::prefs::PREFS;
/// A container with a condition.
pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj)
|
else {
None
}
}
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => PREFS.get(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
|
{
Some(self.value)
}
|
conditional_block
|
guard.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::JSContext;
use js::rust::HandleObject;
use servo_config::prefs::PREFS;
/// A container with a condition.
pub struct
|
<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
}
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => PREFS.get(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
|
Guard
|
identifier_name
|
mod_power_of_2_is_reduced.rs
|
use malachite_base::num::arithmetic::traits::{ModIsReduced, ModPowerOf2IsReduced, PowerOf2};
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::runner::Runner;
use malachite_nz::natural::Natural;
use malachite_nz_test_util::bench::bucketers::pair_1_natural_bit_bucketer;
use malachite_nz_test_util::generators::natural_unsigned_pair_gen_var_4;
pub(crate) fn register(runner: &mut Runner) {
register_demo!(runner, demo_natural_mod_power_of_2_is_reduced);
register_bench!(
runner,
benchmark_natural_mod_power_of_2_is_reduced_algorithms
);
}
fn demo_natural_mod_power_of_2_is_reduced(gm: GenMode, config: GenConfig, limit: usize)
|
fn benchmark_natural_mod_power_of_2_is_reduced_algorithms(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
"limbs_mod_power_of_2_add_limb(&[Limb], Limb, u64)",
BenchmarkType::Algorithms,
natural_unsigned_pair_gen_var_4().get(gm, &config),
gm.name(),
limit,
file_name,
&pair_1_natural_bit_bucketer("n"),
&mut [
("default", &mut |(n, log_base)| {
no_out!(n.mod_power_of_2_is_reduced(log_base))
}),
("using mod_is_reduced", &mut |(n, log_base)| {
no_out!(n.mod_is_reduced(&Natural::power_of_2(log_base)))
}),
],
);
}
|
{
for (n, log_base) in natural_unsigned_pair_gen_var_4()
.get(gm, &config)
.take(limit)
{
if n.mod_power_of_2_is_reduced(log_base) {
println!("{} is reduced mod 2^{}", n, log_base);
} else {
println!("{} is not reduced mod 2^{}", n, log_base);
}
}
}
|
identifier_body
|
mod_power_of_2_is_reduced.rs
|
use malachite_base::num::arithmetic::traits::{ModIsReduced, ModPowerOf2IsReduced, PowerOf2};
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::runner::Runner;
use malachite_nz::natural::Natural;
use malachite_nz_test_util::bench::bucketers::pair_1_natural_bit_bucketer;
use malachite_nz_test_util::generators::natural_unsigned_pair_gen_var_4;
pub(crate) fn
|
(runner: &mut Runner) {
register_demo!(runner, demo_natural_mod_power_of_2_is_reduced);
register_bench!(
runner,
benchmark_natural_mod_power_of_2_is_reduced_algorithms
);
}
fn demo_natural_mod_power_of_2_is_reduced(gm: GenMode, config: GenConfig, limit: usize) {
for (n, log_base) in natural_unsigned_pair_gen_var_4()
.get(gm, &config)
.take(limit)
{
if n.mod_power_of_2_is_reduced(log_base) {
println!("{} is reduced mod 2^{}", n, log_base);
} else {
println!("{} is not reduced mod 2^{}", n, log_base);
}
}
}
fn benchmark_natural_mod_power_of_2_is_reduced_algorithms(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
"limbs_mod_power_of_2_add_limb(&[Limb], Limb, u64)",
BenchmarkType::Algorithms,
natural_unsigned_pair_gen_var_4().get(gm, &config),
gm.name(),
limit,
file_name,
&pair_1_natural_bit_bucketer("n"),
&mut [
("default", &mut |(n, log_base)| {
no_out!(n.mod_power_of_2_is_reduced(log_base))
}),
("using mod_is_reduced", &mut |(n, log_base)| {
no_out!(n.mod_is_reduced(&Natural::power_of_2(log_base)))
}),
],
);
}
|
register
|
identifier_name
|
mod_power_of_2_is_reduced.rs
|
use malachite_base::num::arithmetic::traits::{ModIsReduced, ModPowerOf2IsReduced, PowerOf2};
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::runner::Runner;
|
register_demo!(runner, demo_natural_mod_power_of_2_is_reduced);
register_bench!(
runner,
benchmark_natural_mod_power_of_2_is_reduced_algorithms
);
}
fn demo_natural_mod_power_of_2_is_reduced(gm: GenMode, config: GenConfig, limit: usize) {
for (n, log_base) in natural_unsigned_pair_gen_var_4()
.get(gm, &config)
.take(limit)
{
if n.mod_power_of_2_is_reduced(log_base) {
println!("{} is reduced mod 2^{}", n, log_base);
} else {
println!("{} is not reduced mod 2^{}", n, log_base);
}
}
}
fn benchmark_natural_mod_power_of_2_is_reduced_algorithms(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
"limbs_mod_power_of_2_add_limb(&[Limb], Limb, u64)",
BenchmarkType::Algorithms,
natural_unsigned_pair_gen_var_4().get(gm, &config),
gm.name(),
limit,
file_name,
&pair_1_natural_bit_bucketer("n"),
&mut [
("default", &mut |(n, log_base)| {
no_out!(n.mod_power_of_2_is_reduced(log_base))
}),
("using mod_is_reduced", &mut |(n, log_base)| {
no_out!(n.mod_is_reduced(&Natural::power_of_2(log_base)))
}),
],
);
}
|
use malachite_nz::natural::Natural;
use malachite_nz_test_util::bench::bucketers::pair_1_natural_bit_bucketer;
use malachite_nz_test_util::generators::natural_unsigned_pair_gen_var_4;
pub(crate) fn register(runner: &mut Runner) {
|
random_line_split
|
mod_power_of_2_is_reduced.rs
|
use malachite_base::num::arithmetic::traits::{ModIsReduced, ModPowerOf2IsReduced, PowerOf2};
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::runner::Runner;
use malachite_nz::natural::Natural;
use malachite_nz_test_util::bench::bucketers::pair_1_natural_bit_bucketer;
use malachite_nz_test_util::generators::natural_unsigned_pair_gen_var_4;
pub(crate) fn register(runner: &mut Runner) {
register_demo!(runner, demo_natural_mod_power_of_2_is_reduced);
register_bench!(
runner,
benchmark_natural_mod_power_of_2_is_reduced_algorithms
);
}
fn demo_natural_mod_power_of_2_is_reduced(gm: GenMode, config: GenConfig, limit: usize) {
for (n, log_base) in natural_unsigned_pair_gen_var_4()
.get(gm, &config)
.take(limit)
{
if n.mod_power_of_2_is_reduced(log_base) {
println!("{} is reduced mod 2^{}", n, log_base);
} else
|
}
}
fn benchmark_natural_mod_power_of_2_is_reduced_algorithms(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,
) {
run_benchmark(
"limbs_mod_power_of_2_add_limb(&[Limb], Limb, u64)",
BenchmarkType::Algorithms,
natural_unsigned_pair_gen_var_4().get(gm, &config),
gm.name(),
limit,
file_name,
&pair_1_natural_bit_bucketer("n"),
&mut [
("default", &mut |(n, log_base)| {
no_out!(n.mod_power_of_2_is_reduced(log_base))
}),
("using mod_is_reduced", &mut |(n, log_base)| {
no_out!(n.mod_is_reduced(&Natural::power_of_2(log_base)))
}),
],
);
}
|
{
println!("{} is not reduced mod 2^{}", n, log_base);
}
|
conditional_block
|
file_reporter.rs
|
use super::{ErrorReporter, ReportKind, Message };
use std::fs::File;
use std::io::{BufRead, BufReader, Write};
use std::cmp;
use std::iter;
use crate::common::{
constants::SPACES_PER_TAB,
node_info::Span,
};
/*
Reports errors by printing/highlighting content from the given file
*/
pub struct FileErrorReporter {
file_path: String,
errors: i32,
messages: Vec<Message>,
}
impl FileErrorReporter {
pub fn new(file: &str) -> FileErrorReporter {
FileErrorReporter {
file_path: file.to_owned(),
errors: 0,
messages: vec![],
}
}
fn read_lines(&self) -> Vec<String> {
let mut lines = vec![];
// Todo: Replace with something slightly saner
// Works for small files, but can be expensive memory wise in case the source file is huge
let f = match File::open(&self.file_path) {
Ok(file) => file,
Err(e) => panic!("Failed to open file {}: {}", self.file_path, e),
};
let reader = BufReader::new(f);
for line in reader.lines() {
match line {
Ok(content) => lines.push(content.replace("\t", &" ".repeat(SPACES_PER_TAB as usize))),
Err(e) => panic!("IO error: {}", e),
}
}
lines
}
fn update_error_count(&mut self, error_type: &ReportKind) {
match error_type {
ReportKind::TokenError |
ReportKind::TypeError |
ReportKind::NameError |
ReportKind::SyntaxError |
ReportKind::DataFlowError => self.errors += 1,
ReportKind::Note | ReportKind::Warning => (),
}
}
}
impl ErrorReporter for FileErrorReporter {
fn report_error(&mut self, report_kind: ReportKind, span: Span, message : String) {
self.update_error_count(&report_kind);
self.messages.push(
Message::HighlightMessage {
span,
report_kind,
message
});
}
fn has_errors(&self) -> bool {
self.errors!= 0
}
fn has_reports(&self) -> bool {
self.messages.len() > 0
}
fn errors(&self) -> i32 {
self.errors
}
fn reports(&self) -> i32 {
self.messages.len() as i32
}
fn print_errors(&self) {
let lines = self.read_lines();
for msg in self.messages.iter() {
match msg {
Message::HighlightMessage{
span,
report_kind,
message,
} => {
write_highlight_message(span, *report_kind, message, lines.as_slice())
}
}
}
}
fn clear_reports(&mut self) {
self.messages.clear();
}
}
fn
|
(span: &Span, report_kind: ReportKind, message: &String, lines: &[String]) {
// may be called from multiple threads, at least in e2e tests. Prevent output from being garbled
// when multiple threads attempt to print at the same time
let stdout = std::io::stdout();
let stderr = std::io::stderr();
let mut _stdouthandle = stdout.lock();
let mut handle = stderr.lock();
// group notes with the warning/error, otherwise add a newline
if report_kind!= ReportKind::Note {
writeln!(&mut handle).unwrap();
}
// main error/warning/note print
writeln!(&mut handle, "{}:{} {}: {}",
span.line,
span.column,
report_kind,
message).unwrap();
// print line
if (span.line as usize) < lines.len() {
let line = &lines[(span.line - 1) as usize];
write!(&mut handle, "{}", line).unwrap();
if!line.ends_with("\n") {
writeln!(&mut handle).unwrap();
}
// indentation for highlighting line
write!(&mut handle, "{}",
iter::repeat(" ").
take(cmp::max(span.column - 1, 0) as usize).
collect::<String>()).unwrap();
// highlighting
let color = report_kind.get_color();
for _ in 0..span.length {
write!(&mut handle, "{}", color.bold().paint("^").to_string()).unwrap();
}
writeln!(&mut handle).unwrap();
}
}
|
write_highlight_message
|
identifier_name
|
file_reporter.rs
|
use super::{ErrorReporter, ReportKind, Message };
use std::fs::File;
use std::io::{BufRead, BufReader, Write};
use std::cmp;
use std::iter;
use crate::common::{
constants::SPACES_PER_TAB,
node_info::Span,
};
/*
Reports errors by printing/highlighting content from the given file
*/
pub struct FileErrorReporter {
file_path: String,
errors: i32,
messages: Vec<Message>,
}
impl FileErrorReporter {
pub fn new(file: &str) -> FileErrorReporter {
FileErrorReporter {
file_path: file.to_owned(),
errors: 0,
messages: vec![],
}
}
fn read_lines(&self) -> Vec<String> {
let mut lines = vec![];
// Todo: Replace with something slightly saner
// Works for small files, but can be expensive memory wise in case the source file is huge
let f = match File::open(&self.file_path) {
Ok(file) => file,
Err(e) => panic!("Failed to open file {}: {}", self.file_path, e),
};
let reader = BufReader::new(f);
for line in reader.lines() {
match line {
Ok(content) => lines.push(content.replace("\t", &" ".repeat(SPACES_PER_TAB as usize))),
Err(e) => panic!("IO error: {}", e),
}
}
lines
}
fn update_error_count(&mut self, error_type: &ReportKind) {
match error_type {
ReportKind::TokenError |
ReportKind::TypeError |
ReportKind::NameError |
ReportKind::SyntaxError |
ReportKind::DataFlowError => self.errors += 1,
ReportKind::Note | ReportKind::Warning => (),
}
}
}
impl ErrorReporter for FileErrorReporter {
fn report_error(&mut self, report_kind: ReportKind, span: Span, message : String) {
self.update_error_count(&report_kind);
self.messages.push(
Message::HighlightMessage {
span,
report_kind,
message
});
}
fn has_errors(&self) -> bool
|
fn has_reports(&self) -> bool {
self.messages.len() > 0
}
fn errors(&self) -> i32 {
self.errors
}
fn reports(&self) -> i32 {
self.messages.len() as i32
}
fn print_errors(&self) {
let lines = self.read_lines();
for msg in self.messages.iter() {
match msg {
Message::HighlightMessage{
span,
report_kind,
message,
} => {
write_highlight_message(span, *report_kind, message, lines.as_slice())
}
}
}
}
fn clear_reports(&mut self) {
self.messages.clear();
}
}
fn write_highlight_message(span: &Span, report_kind: ReportKind, message: &String, lines: &[String]) {
// may be called from multiple threads, at least in e2e tests. Prevent output from being garbled
// when multiple threads attempt to print at the same time
let stdout = std::io::stdout();
let stderr = std::io::stderr();
let mut _stdouthandle = stdout.lock();
let mut handle = stderr.lock();
// group notes with the warning/error, otherwise add a newline
if report_kind!= ReportKind::Note {
writeln!(&mut handle).unwrap();
}
// main error/warning/note print
writeln!(&mut handle, "{}:{} {}: {}",
span.line,
span.column,
report_kind,
message).unwrap();
// print line
if (span.line as usize) < lines.len() {
let line = &lines[(span.line - 1) as usize];
write!(&mut handle, "{}", line).unwrap();
if!line.ends_with("\n") {
writeln!(&mut handle).unwrap();
}
// indentation for highlighting line
write!(&mut handle, "{}",
iter::repeat(" ").
take(cmp::max(span.column - 1, 0) as usize).
collect::<String>()).unwrap();
// highlighting
let color = report_kind.get_color();
for _ in 0..span.length {
write!(&mut handle, "{}", color.bold().paint("^").to_string()).unwrap();
}
writeln!(&mut handle).unwrap();
}
}
|
{
self.errors != 0
}
|
identifier_body
|
file_reporter.rs
|
use super::{ErrorReporter, ReportKind, Message };
use std::fs::File;
use std::io::{BufRead, BufReader, Write};
use std::cmp;
use std::iter;
use crate::common::{
constants::SPACES_PER_TAB,
node_info::Span,
};
/*
Reports errors by printing/highlighting content from the given file
*/
pub struct FileErrorReporter {
file_path: String,
errors: i32,
messages: Vec<Message>,
}
impl FileErrorReporter {
pub fn new(file: &str) -> FileErrorReporter {
FileErrorReporter {
file_path: file.to_owned(),
errors: 0,
messages: vec![],
}
}
fn read_lines(&self) -> Vec<String> {
let mut lines = vec![];
// Todo: Replace with something slightly saner
// Works for small files, but can be expensive memory wise in case the source file is huge
let f = match File::open(&self.file_path) {
Ok(file) => file,
Err(e) => panic!("Failed to open file {}: {}", self.file_path, e),
};
let reader = BufReader::new(f);
for line in reader.lines() {
match line {
Ok(content) => lines.push(content.replace("\t", &" ".repeat(SPACES_PER_TAB as usize))),
Err(e) => panic!("IO error: {}", e),
}
}
lines
}
fn update_error_count(&mut self, error_type: &ReportKind) {
match error_type {
ReportKind::TokenError |
ReportKind::TypeError |
ReportKind::NameError |
ReportKind::SyntaxError |
ReportKind::DataFlowError => self.errors += 1,
ReportKind::Note | ReportKind::Warning => (),
}
}
}
impl ErrorReporter for FileErrorReporter {
fn report_error(&mut self, report_kind: ReportKind, span: Span, message : String) {
self.update_error_count(&report_kind);
self.messages.push(
Message::HighlightMessage {
span,
report_kind,
message
});
}
fn has_errors(&self) -> bool {
self.errors!= 0
}
fn has_reports(&self) -> bool {
self.messages.len() > 0
}
fn errors(&self) -> i32 {
self.errors
}
fn reports(&self) -> i32 {
self.messages.len() as i32
}
fn print_errors(&self) {
let lines = self.read_lines();
for msg in self.messages.iter() {
match msg {
Message::HighlightMessage{
span,
report_kind,
|
}
}
fn clear_reports(&mut self) {
self.messages.clear();
}
}
fn write_highlight_message(span: &Span, report_kind: ReportKind, message: &String, lines: &[String]) {
// may be called from multiple threads, at least in e2e tests. Prevent output from being garbled
// when multiple threads attempt to print at the same time
let stdout = std::io::stdout();
let stderr = std::io::stderr();
let mut _stdouthandle = stdout.lock();
let mut handle = stderr.lock();
// group notes with the warning/error, otherwise add a newline
if report_kind!= ReportKind::Note {
writeln!(&mut handle).unwrap();
}
// main error/warning/note print
writeln!(&mut handle, "{}:{} {}: {}",
span.line,
span.column,
report_kind,
message).unwrap();
// print line
if (span.line as usize) < lines.len() {
let line = &lines[(span.line - 1) as usize];
write!(&mut handle, "{}", line).unwrap();
if!line.ends_with("\n") {
writeln!(&mut handle).unwrap();
}
// indentation for highlighting line
write!(&mut handle, "{}",
iter::repeat(" ").
take(cmp::max(span.column - 1, 0) as usize).
collect::<String>()).unwrap();
// highlighting
let color = report_kind.get_color();
for _ in 0..span.length {
write!(&mut handle, "{}", color.bold().paint("^").to_string()).unwrap();
}
writeln!(&mut handle).unwrap();
}
}
|
message,
} => {
write_highlight_message(span, *report_kind, message, lines.as_slice())
}
}
|
random_line_split
|
unique-object-noncopyable.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo {
fn f(&self);
}
struct Bar {
x: int,
}
impl Drop for Bar {
fn drop(&mut self) {}
}
impl Foo for Bar {
fn f(&self)
|
}
fn main() {
let x = ~Bar { x: 10 };
let y: ~Foo = x as ~Foo;
let _z = y.clone(); //~ ERROR does not implement any method in scope
}
|
{
println!("hi");
}
|
identifier_body
|
unique-object-noncopyable.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo {
fn f(&self);
}
struct Bar {
x: int,
}
impl Drop for Bar {
fn
|
(&mut self) {}
}
impl Foo for Bar {
fn f(&self) {
println!("hi");
}
}
fn main() {
let x = ~Bar { x: 10 };
let y: ~Foo = x as ~Foo;
let _z = y.clone(); //~ ERROR does not implement any method in scope
}
|
drop
|
identifier_name
|
unique-object-noncopyable.rs
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo {
fn f(&self);
}
struct Bar {
x: int,
}
impl Drop for Bar {
fn drop(&mut self) {}
}
impl Foo for Bar {
fn f(&self) {
println!("hi");
}
}
fn main() {
let x = ~Bar { x: 10 };
let y: ~Foo = x as ~Foo;
let _z = y.clone(); //~ ERROR does not implement any method in scope
}
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
random_line_split
|
|
main.rs
|
extern crate rustc_serialize;
extern crate docopt;
#[macro_use]
extern crate lust;
use std::io::{self, Write, Read};
use std::path::Path;
use std::fs::{File, metadata};
use docopt::Docopt;
use lust::Parser;
use lust::State;
macro_rules! println_error {
($err:expr) => (println!("Whoops, error detected.\n{}.\n\
Please, try again...", $err))
}
macro_rules! try_ok {
($e:expr) => ({
match $e {
Ok(res) => {
res
},
Err(err) => {
return println_error!(err)
}
}
})
}
static USAGE: &'static str = "
Usage:
lust [options] [<expr>]
Options:
-f <file_path>, --file <file_path> Evaluate expresions from file
-i, --interactive Run REPL session
";
#[derive(RustcDecodable, Debug)]
struct CliArgs {
arg_expr: Option<String>,
flag_file: Option<String>,
flag_interactive: bool,
}
#[cfg_attr(test, allow(dead_code))]
fn
|
() {
let args = try_ok!(Docopt::new(USAGE).and_then(|d| d.decode::<CliArgs>()));
let ref mut state = State::new("user".to_string());
let mut last_evaled = None;
if let Some(ref flag_file) = args.flag_file {
let path = Path::new(flag_file);
let md = metadata(path);
if is_file_exists!(md) {
if is_file!(md) {
let mut file = try_ok!(File::open(&path));
let ref mut buf = String::new();
try_ok!(file.read_to_string(buf));
for parsed_expr in Parser::new(buf.chars()) {
last_evaled = Some(try_ok!(state.eval(&try_ok!(parsed_expr))));
}
} else {
return println_error!("Specified path is not a file.");
}
} else {
return println_error!("File doesn't exist.");
}
}
if let Some(ref arg_expr) = args.arg_expr {
for parsed_expr in Parser::new(arg_expr.chars()) {
last_evaled = Some(try_ok!(state.eval(&try_ok!(parsed_expr))))
}
}
if args.flag_interactive {
let stdin = io::stdin();
let mut stdout = io::stdout();
loop {
print!("-> ");
stdout.flush().ok();
let ref mut buf = String::new();
if try_ok!(stdin.read_line(buf)) > 0 {
for expr in Parser::new(buf.chars()) {
match expr {
Ok(parsed_expr) => {
match state.eval(&parsed_expr) {
Ok(res) => {
println!("{}", res);
},
Err(err) => {
println_error!(err);
}
}
},
Err(err) => {
println_error!(err);
},
}
}
} else {
return println!("\nHope you enjoyed.\nSee you...");
}
}
} else if let Some(ref expr) = last_evaled {
println!("{}", expr);
}
}
|
main
|
identifier_name
|
main.rs
|
extern crate rustc_serialize;
extern crate docopt;
#[macro_use]
extern crate lust;
use std::io::{self, Write, Read};
use std::path::Path;
use std::fs::{File, metadata};
use docopt::Docopt;
use lust::Parser;
use lust::State;
macro_rules! println_error {
($err:expr) => (println!("Whoops, error detected.\n{}.\n\
Please, try again...", $err))
}
macro_rules! try_ok {
($e:expr) => ({
match $e {
Ok(res) => {
res
},
Err(err) => {
return println_error!(err)
}
}
})
}
static USAGE: &'static str = "
Usage:
lust [options] [<expr>]
Options:
-f <file_path>, --file <file_path> Evaluate expresions from file
-i, --interactive Run REPL session
";
#[derive(RustcDecodable, Debug)]
struct CliArgs {
arg_expr: Option<String>,
flag_file: Option<String>,
flag_interactive: bool,
}
#[cfg_attr(test, allow(dead_code))]
fn main() {
let args = try_ok!(Docopt::new(USAGE).and_then(|d| d.decode::<CliArgs>()));
let ref mut state = State::new("user".to_string());
let mut last_evaled = None;
if let Some(ref flag_file) = args.flag_file {
let path = Path::new(flag_file);
let md = metadata(path);
if is_file_exists!(md) {
if is_file!(md) {
let mut file = try_ok!(File::open(&path));
let ref mut buf = String::new();
try_ok!(file.read_to_string(buf));
for parsed_expr in Parser::new(buf.chars()) {
last_evaled = Some(try_ok!(state.eval(&try_ok!(parsed_expr))));
}
} else {
return println_error!("Specified path is not a file.");
}
} else {
return println_error!("File doesn't exist.");
}
}
if let Some(ref arg_expr) = args.arg_expr
|
if args.flag_interactive {
let stdin = io::stdin();
let mut stdout = io::stdout();
loop {
print!("-> ");
stdout.flush().ok();
let ref mut buf = String::new();
if try_ok!(stdin.read_line(buf)) > 0 {
for expr in Parser::new(buf.chars()) {
match expr {
Ok(parsed_expr) => {
match state.eval(&parsed_expr) {
Ok(res) => {
println!("{}", res);
},
Err(err) => {
println_error!(err);
}
}
},
Err(err) => {
println_error!(err);
},
}
}
} else {
return println!("\nHope you enjoyed.\nSee you...");
}
}
} else if let Some(ref expr) = last_evaled {
println!("{}", expr);
}
}
|
{
for parsed_expr in Parser::new(arg_expr.chars()) {
last_evaled = Some(try_ok!(state.eval(&try_ok!(parsed_expr))))
}
}
|
conditional_block
|
main.rs
|
extern crate rustc_serialize;
extern crate docopt;
#[macro_use]
extern crate lust;
use std::io::{self, Write, Read};
use std::path::Path;
use std::fs::{File, metadata};
use docopt::Docopt;
use lust::Parser;
use lust::State;
macro_rules! println_error {
($err:expr) => (println!("Whoops, error detected.\n{}.\n\
Please, try again...", $err))
}
macro_rules! try_ok {
($e:expr) => ({
match $e {
Ok(res) => {
res
},
Err(err) => {
return println_error!(err)
}
}
})
}
static USAGE: &'static str = "
Usage:
lust [options] [<expr>]
Options:
-f <file_path>, --file <file_path> Evaluate expresions from file
-i, --interactive Run REPL session
";
#[derive(RustcDecodable, Debug)]
struct CliArgs {
arg_expr: Option<String>,
flag_file: Option<String>,
flag_interactive: bool,
}
#[cfg_attr(test, allow(dead_code))]
fn main()
|
return println_error!("File doesn't exist.");
}
}
if let Some(ref arg_expr) = args.arg_expr {
for parsed_expr in Parser::new(arg_expr.chars()) {
last_evaled = Some(try_ok!(state.eval(&try_ok!(parsed_expr))))
}
}
if args.flag_interactive {
let stdin = io::stdin();
let mut stdout = io::stdout();
loop {
print!("-> ");
stdout.flush().ok();
let ref mut buf = String::new();
if try_ok!(stdin.read_line(buf)) > 0 {
for expr in Parser::new(buf.chars()) {
match expr {
Ok(parsed_expr) => {
match state.eval(&parsed_expr) {
Ok(res) => {
println!("{}", res);
},
Err(err) => {
println_error!(err);
}
}
},
Err(err) => {
println_error!(err);
},
}
}
} else {
return println!("\nHope you enjoyed.\nSee you...");
}
}
} else if let Some(ref expr) = last_evaled {
println!("{}", expr);
}
}
|
{
let args = try_ok!(Docopt::new(USAGE).and_then(|d| d.decode::<CliArgs>()));
let ref mut state = State::new("user".to_string());
let mut last_evaled = None;
if let Some(ref flag_file) = args.flag_file {
let path = Path::new(flag_file);
let md = metadata(path);
if is_file_exists!(md) {
if is_file!(md) {
let mut file = try_ok!(File::open(&path));
let ref mut buf = String::new();
try_ok!(file.read_to_string(buf));
for parsed_expr in Parser::new(buf.chars()) {
last_evaled = Some(try_ok!(state.eval(&try_ok!(parsed_expr))));
}
} else {
return println_error!("Specified path is not a file.");
}
} else {
|
identifier_body
|
main.rs
|
extern crate rustc_serialize;
extern crate docopt;
#[macro_use]
extern crate lust;
use std::io::{self, Write, Read};
use std::path::Path;
use std::fs::{File, metadata};
use docopt::Docopt;
use lust::Parser;
use lust::State;
macro_rules! println_error {
($err:expr) => (println!("Whoops, error detected.\n{}.\n\
Please, try again...", $err))
}
macro_rules! try_ok {
($e:expr) => ({
match $e {
Ok(res) => {
res
},
Err(err) => {
return println_error!(err)
}
}
})
}
static USAGE: &'static str = "
Usage:
lust [options] [<expr>]
Options:
-f <file_path>, --file <file_path> Evaluate expresions from file
-i, --interactive Run REPL session
";
#[derive(RustcDecodable, Debug)]
struct CliArgs {
arg_expr: Option<String>,
flag_file: Option<String>,
flag_interactive: bool,
}
#[cfg_attr(test, allow(dead_code))]
fn main() {
|
let ref mut state = State::new("user".to_string());
let mut last_evaled = None;
if let Some(ref flag_file) = args.flag_file {
let path = Path::new(flag_file);
let md = metadata(path);
if is_file_exists!(md) {
if is_file!(md) {
let mut file = try_ok!(File::open(&path));
let ref mut buf = String::new();
try_ok!(file.read_to_string(buf));
for parsed_expr in Parser::new(buf.chars()) {
last_evaled = Some(try_ok!(state.eval(&try_ok!(parsed_expr))));
}
} else {
return println_error!("Specified path is not a file.");
}
} else {
return println_error!("File doesn't exist.");
}
}
if let Some(ref arg_expr) = args.arg_expr {
for parsed_expr in Parser::new(arg_expr.chars()) {
last_evaled = Some(try_ok!(state.eval(&try_ok!(parsed_expr))))
}
}
if args.flag_interactive {
let stdin = io::stdin();
let mut stdout = io::stdout();
loop {
print!("-> ");
stdout.flush().ok();
let ref mut buf = String::new();
if try_ok!(stdin.read_line(buf)) > 0 {
for expr in Parser::new(buf.chars()) {
match expr {
Ok(parsed_expr) => {
match state.eval(&parsed_expr) {
Ok(res) => {
println!("{}", res);
},
Err(err) => {
println_error!(err);
}
}
},
Err(err) => {
println_error!(err);
},
}
}
} else {
return println!("\nHope you enjoyed.\nSee you...");
}
}
} else if let Some(ref expr) = last_evaled {
println!("{}", expr);
}
}
|
let args = try_ok!(Docopt::new(USAGE).and_then(|d| d.decode::<CliArgs>()));
|
random_line_split
|
main.rs
|
#![feature(rustc_private)]
extern crate rustc;
extern crate rustc_back;
extern crate rustc_metadata;
extern crate rustc_llvm;
extern crate flate;
extern crate syntax_pos;
extern crate serialize as rustc_serialize;
#[macro_use] extern crate log;
mod sectionreader;
use rustc_serialize::{Encodable, Encoder, Decodable};
use rustc_back::target::{Target, TargetResult, TargetOptions};
use rustc::middle::lang_items;
//use rustc_metadata::locator::get_metadata_section;
use sectionreader::{get_metadata_section, CrateFlavor};
use rustc::hir;
use rustc::hir::def::{self, CtorKind};
use rustc::hir::def_id::{DefIndex, DefId};
use rustc::middle::cstore::{LinkagePreference, NativeLibraryKind};
use rustc_metadata::cstore::MetadataBlob;
use rustc_metadata::schema::LazySeq;
use rustc_metadata::schema::CrateDep;
use rustc_metadata::schema::MacroDef;
use rustc_metadata::schema::TraitImpls;
use rustc_metadata::index;
use rustc_back::PanicStrategy;
use std::path::Path;
use std::default::Default;
use std::env;
use rustc_serialize::json::{self};
use rustc_serialize::json::ToJson;
pub fn opts() -> TargetOptions {
TargetOptions {
function_sections: true,
linker: "link.exe".to_string(),
ar: "llvm-ar.exe".to_string(),
|
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
is_like_windows: true,
is_like_msvc: true,
pre_link_args: vec![
"/NOLOGO".to_string(),
"/NXCOMPAT".to_string(),
],
exe_allocation_crate: "alloc_system".to_string(),
.. Default::default()
}
}
fn target() -> TargetResult {
let mut base = opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "x86_64-pc-windows-msvc".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "windows".to_string(),
target_env: "msvc".to_string(),
target_vendor: "pc".to_string(),
options: base,
})
}
//#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable)]
pub struct KrateRut
{
pub rustc_version: String,
pub name: String,
pub triple: String,
pub hash: u64,
pub disambiguator: String,
pub panic_strategy: PanicStrategy,
pub plugin_registrar_fn: Option<DefIndex>,
pub macro_derive_registrar: Option<DefIndex>,
dependencies : Vec<CrateDep>,
dylib_dependency_formats : Vec<Option<LinkagePreference>>,
lang_items : Vec<(DefIndex, usize)>,
lang_items_missing: Vec<lang_items::LangItem>,
native_libraries: Vec<(NativeLibraryKind, String)>,
codemap: Vec<syntax_pos::FileMap>,
macro_defs: Vec<MacroDef>,
// impls: Vec<TraitImpls>,
reachable_ids: Vec<DefIndex>,
// index: Vec<index::Index>
/*
pub plugin_registrar_fn: Option<DefIndex>,
pub macro_derive_registrar: Option<DefIndex>,*/
}
fn deserialize<T : Decodable>(inp : LazySeq<T>, meta : &MetadataBlob) -> Vec<T>
{
let mut res = Vec::new();
for (_, r) in inp.decode(meta).enumerate() {
res.push(r);
};
res
}
impl KrateRut
{
pub fn new(metadata_blob : MetadataBlob) -> KrateRut
{
let mut strins = Vec::new();
let crate_root = metadata_blob.get_root();
for (_, dep) in crate_root.crate_deps.decode(&metadata_blob).enumerate() {
strins.push(format!("{}",dep.name));
};
KrateRut
{
name : crate_root.name,
rustc_version : crate_root.rustc_version,
triple : crate_root.triple,
hash : crate_root.hash.as_u64(),
disambiguator : crate_root.disambiguator,
panic_strategy: crate_root.panic_strategy,
plugin_registrar_fn : crate_root.plugin_registrar_fn,
macro_derive_registrar : crate_root.macro_derive_registrar,
dependencies : deserialize(crate_root.crate_deps, &metadata_blob),
dylib_dependency_formats : deserialize(crate_root.dylib_dependency_formats, &metadata_blob),
lang_items : deserialize(crate_root.lang_items, &metadata_blob),
lang_items_missing : deserialize(crate_root.lang_items_missing, &metadata_blob),
native_libraries: deserialize(crate_root.native_libraries, &metadata_blob),
codemap: deserialize(crate_root.codemap, &metadata_blob),
macro_defs: deserialize(crate_root.macro_defs, &metadata_blob),
//impls: deserialize(crate_root.impls, &metadata_blob),
reachable_ids: deserialize(crate_root.reachable_ids, &metadata_blob),
// index: deserialize(crate_root.index, &metadata_blob),
}
}
}
//impl Encodable for hir::svh::Svh {
// fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
// s.emit_u64(self.as_u64().to_le())
// }
//}
fn d0(path : String)
{
let crate_path = Path::new(&path);
let flavor = CrateFlavor::Dylib;
let target = target().unwrap();
let metadata_blob = get_metadata_section(&target, flavor, crate_path).unwrap();
let krate_rut = KrateRut::new(metadata_blob);
let encoded = json::encode(&krate_rut).unwrap();
//let crate_root = metadata_blob.get_root();
//let encoded2 = json::encode(&crate_root).unwrap();
println!("{}",encoded);
}
fn main() {
let strategey = rustc_back::PanicStrategy::Unwind;
strategey.to_json();
match env::args().nth(1)
{
Some(arg) => d0(arg),
_ => panic!("no")
}
}
|
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
|
random_line_split
|
main.rs
|
#![feature(rustc_private)]
extern crate rustc;
extern crate rustc_back;
extern crate rustc_metadata;
extern crate rustc_llvm;
extern crate flate;
extern crate syntax_pos;
extern crate serialize as rustc_serialize;
#[macro_use] extern crate log;
mod sectionreader;
use rustc_serialize::{Encodable, Encoder, Decodable};
use rustc_back::target::{Target, TargetResult, TargetOptions};
use rustc::middle::lang_items;
//use rustc_metadata::locator::get_metadata_section;
use sectionreader::{get_metadata_section, CrateFlavor};
use rustc::hir;
use rustc::hir::def::{self, CtorKind};
use rustc::hir::def_id::{DefIndex, DefId};
use rustc::middle::cstore::{LinkagePreference, NativeLibraryKind};
use rustc_metadata::cstore::MetadataBlob;
use rustc_metadata::schema::LazySeq;
use rustc_metadata::schema::CrateDep;
use rustc_metadata::schema::MacroDef;
use rustc_metadata::schema::TraitImpls;
use rustc_metadata::index;
use rustc_back::PanicStrategy;
use std::path::Path;
use std::default::Default;
use std::env;
use rustc_serialize::json::{self};
use rustc_serialize::json::ToJson;
pub fn opts() -> TargetOptions {
TargetOptions {
function_sections: true,
linker: "link.exe".to_string(),
ar: "llvm-ar.exe".to_string(),
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
is_like_windows: true,
is_like_msvc: true,
pre_link_args: vec![
"/NOLOGO".to_string(),
"/NXCOMPAT".to_string(),
],
exe_allocation_crate: "alloc_system".to_string(),
.. Default::default()
}
}
fn target() -> TargetResult {
let mut base = opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "x86_64-pc-windows-msvc".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "windows".to_string(),
target_env: "msvc".to_string(),
target_vendor: "pc".to_string(),
options: base,
})
}
//#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable)]
pub struct KrateRut
{
pub rustc_version: String,
pub name: String,
pub triple: String,
pub hash: u64,
pub disambiguator: String,
pub panic_strategy: PanicStrategy,
pub plugin_registrar_fn: Option<DefIndex>,
pub macro_derive_registrar: Option<DefIndex>,
dependencies : Vec<CrateDep>,
dylib_dependency_formats : Vec<Option<LinkagePreference>>,
lang_items : Vec<(DefIndex, usize)>,
lang_items_missing: Vec<lang_items::LangItem>,
native_libraries: Vec<(NativeLibraryKind, String)>,
codemap: Vec<syntax_pos::FileMap>,
macro_defs: Vec<MacroDef>,
// impls: Vec<TraitImpls>,
reachable_ids: Vec<DefIndex>,
// index: Vec<index::Index>
/*
pub plugin_registrar_fn: Option<DefIndex>,
pub macro_derive_registrar: Option<DefIndex>,*/
}
fn deserialize<T : Decodable>(inp : LazySeq<T>, meta : &MetadataBlob) -> Vec<T>
{
let mut res = Vec::new();
for (_, r) in inp.decode(meta).enumerate() {
res.push(r);
};
res
}
impl KrateRut
{
pub fn
|
(metadata_blob : MetadataBlob) -> KrateRut
{
let mut strins = Vec::new();
let crate_root = metadata_blob.get_root();
for (_, dep) in crate_root.crate_deps.decode(&metadata_blob).enumerate() {
strins.push(format!("{}",dep.name));
};
KrateRut
{
name : crate_root.name,
rustc_version : crate_root.rustc_version,
triple : crate_root.triple,
hash : crate_root.hash.as_u64(),
disambiguator : crate_root.disambiguator,
panic_strategy: crate_root.panic_strategy,
plugin_registrar_fn : crate_root.plugin_registrar_fn,
macro_derive_registrar : crate_root.macro_derive_registrar,
dependencies : deserialize(crate_root.crate_deps, &metadata_blob),
dylib_dependency_formats : deserialize(crate_root.dylib_dependency_formats, &metadata_blob),
lang_items : deserialize(crate_root.lang_items, &metadata_blob),
lang_items_missing : deserialize(crate_root.lang_items_missing, &metadata_blob),
native_libraries: deserialize(crate_root.native_libraries, &metadata_blob),
codemap: deserialize(crate_root.codemap, &metadata_blob),
macro_defs: deserialize(crate_root.macro_defs, &metadata_blob),
//impls: deserialize(crate_root.impls, &metadata_blob),
reachable_ids: deserialize(crate_root.reachable_ids, &metadata_blob),
// index: deserialize(crate_root.index, &metadata_blob),
}
}
}
//impl Encodable for hir::svh::Svh {
// fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
// s.emit_u64(self.as_u64().to_le())
// }
//}
fn d0(path : String)
{
let crate_path = Path::new(&path);
let flavor = CrateFlavor::Dylib;
let target = target().unwrap();
let metadata_blob = get_metadata_section(&target, flavor, crate_path).unwrap();
let krate_rut = KrateRut::new(metadata_blob);
let encoded = json::encode(&krate_rut).unwrap();
//let crate_root = metadata_blob.get_root();
//let encoded2 = json::encode(&crate_root).unwrap();
println!("{}",encoded);
}
fn main() {
let strategey = rustc_back::PanicStrategy::Unwind;
strategey.to_json();
match env::args().nth(1)
{
Some(arg) => d0(arg),
_ => panic!("no")
}
}
|
new
|
identifier_name
|
main.rs
|
#![feature(rustc_private)]
extern crate rustc;
extern crate rustc_back;
extern crate rustc_metadata;
extern crate rustc_llvm;
extern crate flate;
extern crate syntax_pos;
extern crate serialize as rustc_serialize;
#[macro_use] extern crate log;
mod sectionreader;
use rustc_serialize::{Encodable, Encoder, Decodable};
use rustc_back::target::{Target, TargetResult, TargetOptions};
use rustc::middle::lang_items;
//use rustc_metadata::locator::get_metadata_section;
use sectionreader::{get_metadata_section, CrateFlavor};
use rustc::hir;
use rustc::hir::def::{self, CtorKind};
use rustc::hir::def_id::{DefIndex, DefId};
use rustc::middle::cstore::{LinkagePreference, NativeLibraryKind};
use rustc_metadata::cstore::MetadataBlob;
use rustc_metadata::schema::LazySeq;
use rustc_metadata::schema::CrateDep;
use rustc_metadata::schema::MacroDef;
use rustc_metadata::schema::TraitImpls;
use rustc_metadata::index;
use rustc_back::PanicStrategy;
use std::path::Path;
use std::default::Default;
use std::env;
use rustc_serialize::json::{self};
use rustc_serialize::json::ToJson;
pub fn opts() -> TargetOptions
|
.. Default::default()
}
}
fn target() -> TargetResult {
let mut base = opts();
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "x86_64-pc-windows-msvc".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(),
arch: "x86_64".to_string(),
target_os: "windows".to_string(),
target_env: "msvc".to_string(),
target_vendor: "pc".to_string(),
options: base,
})
}
//#[derive(RustcEncodable, RustcDecodable)]
#[derive(RustcEncodable)]
pub struct KrateRut
{
pub rustc_version: String,
pub name: String,
pub triple: String,
pub hash: u64,
pub disambiguator: String,
pub panic_strategy: PanicStrategy,
pub plugin_registrar_fn: Option<DefIndex>,
pub macro_derive_registrar: Option<DefIndex>,
dependencies : Vec<CrateDep>,
dylib_dependency_formats : Vec<Option<LinkagePreference>>,
lang_items : Vec<(DefIndex, usize)>,
lang_items_missing: Vec<lang_items::LangItem>,
native_libraries: Vec<(NativeLibraryKind, String)>,
codemap: Vec<syntax_pos::FileMap>,
macro_defs: Vec<MacroDef>,
// impls: Vec<TraitImpls>,
reachable_ids: Vec<DefIndex>,
// index: Vec<index::Index>
/*
pub plugin_registrar_fn: Option<DefIndex>,
pub macro_derive_registrar: Option<DefIndex>,*/
}
fn deserialize<T : Decodable>(inp : LazySeq<T>, meta : &MetadataBlob) -> Vec<T>
{
let mut res = Vec::new();
for (_, r) in inp.decode(meta).enumerate() {
res.push(r);
};
res
}
impl KrateRut
{
pub fn new(metadata_blob : MetadataBlob) -> KrateRut
{
let mut strins = Vec::new();
let crate_root = metadata_blob.get_root();
for (_, dep) in crate_root.crate_deps.decode(&metadata_blob).enumerate() {
strins.push(format!("{}",dep.name));
};
KrateRut
{
name : crate_root.name,
rustc_version : crate_root.rustc_version,
triple : crate_root.triple,
hash : crate_root.hash.as_u64(),
disambiguator : crate_root.disambiguator,
panic_strategy: crate_root.panic_strategy,
plugin_registrar_fn : crate_root.plugin_registrar_fn,
macro_derive_registrar : crate_root.macro_derive_registrar,
dependencies : deserialize(crate_root.crate_deps, &metadata_blob),
dylib_dependency_formats : deserialize(crate_root.dylib_dependency_formats, &metadata_blob),
lang_items : deserialize(crate_root.lang_items, &metadata_blob),
lang_items_missing : deserialize(crate_root.lang_items_missing, &metadata_blob),
native_libraries: deserialize(crate_root.native_libraries, &metadata_blob),
codemap: deserialize(crate_root.codemap, &metadata_blob),
macro_defs: deserialize(crate_root.macro_defs, &metadata_blob),
//impls: deserialize(crate_root.impls, &metadata_blob),
reachable_ids: deserialize(crate_root.reachable_ids, &metadata_blob),
// index: deserialize(crate_root.index, &metadata_blob),
}
}
}
//impl Encodable for hir::svh::Svh {
// fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
// s.emit_u64(self.as_u64().to_le())
// }
//}
fn d0(path : String)
{
let crate_path = Path::new(&path);
let flavor = CrateFlavor::Dylib;
let target = target().unwrap();
let metadata_blob = get_metadata_section(&target, flavor, crate_path).unwrap();
let krate_rut = KrateRut::new(metadata_blob);
let encoded = json::encode(&krate_rut).unwrap();
//let crate_root = metadata_blob.get_root();
//let encoded2 = json::encode(&crate_root).unwrap();
println!("{}",encoded);
}
fn main() {
let strategey = rustc_back::PanicStrategy::Unwind;
strategey.to_json();
match env::args().nth(1)
{
Some(arg) => d0(arg),
_ => panic!("no")
}
}
|
{
TargetOptions {
function_sections: true,
linker: "link.exe".to_string(),
ar: "llvm-ar.exe".to_string(),
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
is_like_windows: true,
is_like_msvc: true,
pre_link_args: vec![
"/NOLOGO".to_string(),
"/NXCOMPAT".to_string(),
],
exe_allocation_crate: "alloc_system".to_string(),
|
identifier_body
|
dropout.rs
|
use prelude::*;
use kernels::ffi::*;
use densearray::prelude::*;
use operator::prelude::*;
use rng::xorshift::{Xorshiftplus128Rng};
use rand::{Rng, thread_rng};
//use rand::distributions::{IndependentSample};
//use rand::distributions::range::{Range};
use std::cell::{RefCell};
use std::cmp::{max};
use std::rc::{Rc};
#[derive(Clone, Debug)]
pub struct DropoutOperatorConfig {
pub batch_sz: usize,
pub dim: usize,
pub drop_frac: f32,
}
pub struct DropoutOperator<S, IoBuf:?Sized> {
cfg: DropoutOperatorConfig,
node: OperatorNode,
in_op: Rc<RefCell<DiffOperator<S, IoBuf>>>,
in_: CommonOutput,
out: CommonOutput,
mask: Vec<f32>,
rng: Xorshiftplus128Rng,
//dist: Range<f32>,
}
impl<S, IoBuf:?Sized> DropoutOperator<S, IoBuf> {
pub fn new<InOp>(cfg: DropoutOperatorConfig, cap: OpCapability, prev_op: Rc<RefCell<InOp>>, prev_arm: usize) -> Rc<RefCell<DropoutOperator<S, IoBuf>>> where InOp:'static + CommonOperator + DiffOperator<S, IoBuf> {
let in_ = prev_op.borrow()._output(prev_arm);
let out = CommonOutput::new(cfg.batch_sz, cfg.dim, cap);
let mut mask = Vec::with_capacity(cfg.batch_sz * cfg.dim);
mask.resize(cfg.batch_sz * cfg.dim, 0.0);
Rc::new(RefCell::new(DropoutOperator{
cfg: cfg,
node: OperatorNode::default(),
in_op: prev_op,
in_: in_,
out: out,
mask: mask,
rng: Xorshiftplus128Rng::new(&mut thread_rng()),
//dist: Range::new(0.0, 1.0),
}))
}
}
impl<S, IoBuf:?Sized> Operator for DropoutOperator<S, IoBuf> {
fn _next(&self) -> u64 {
self.node._next()
}
}
impl<S, IoBuf:?Sized> CommonOperator for DropoutOperator<S, IoBuf> {
fn _output(&self, arm: usize) -> CommonOutput {
assert_eq!(0, arm);
self.out.clone()
}
}
impl<S, IoBuf:?Sized> DiffOperatorData<S> for DropoutOperator<S, IoBuf> {
}
impl<S, IoBuf:?Sized> DiffOperatorIo<IoBuf> for DropoutOperator<S, IoBuf> {
}
impl<S, IoBuf:?Sized> DiffOperator<S, IoBuf> for DropoutOperator<S, IoBuf> {
fn _traverse_fwd(&mut self, epoch: u64, apply: &mut FnMut(&mut DiffOperator<S, IoBuf>)) {
self.node.push(epoch);
assert!(self.node.limit(1));
self.in_op.borrow_mut()._traverse_fwd(epoch, apply);
apply(self);
self.node.pop(epoch);
}
fn _traverse_bwd(&mut self, epoch: u64, apply: &mut FnMut(&mut DiffOperator<S, IoBuf>)) {
|
self.node.pop(epoch);
}
fn _forward(&mut self, phase: OpPhase) {
let batch_size = self.in_.batch_sz.get();
assert!(batch_size <= self.cfg.batch_sz);
self.out.batch_sz.set(batch_size);
match phase {
OpPhase::Inference => {
self.out.buf.borrow_mut()[.. batch_size * self.cfg.dim]
.copy_from_slice(&self.in_.buf.borrow()[.. batch_size * self.cfg.dim]);
}
OpPhase::Learning => {
let in_buf = &self.in_.buf.borrow()[.. batch_size * self.cfg.dim];
let out_buf = &mut self.out.buf.borrow_mut()[.. batch_size * self.cfg.dim];
for p in 0.. batch_size * self.cfg.dim {
let u: f32 = self.rng.gen();
if u < self.cfg.drop_frac {
self.mask[p] = 0.0;
out_buf[p] = 0.0;
} else {
self.mask[p] = 1.0;
out_buf[p] = in_buf[p];
}
}
}
}
}
fn _backward(&mut self) {
let batch_size = self.out.batch_sz.get();
if let Some(in_grad) = self.in_.grad.as_ref() {
let in_grad = &mut in_grad.borrow_mut()[.. batch_size * self.cfg.dim];
let out_grad = &self.out.grad.as_ref().unwrap().borrow()[.. batch_size * self.cfg.dim];
for p in 0.. batch_size * self.cfg.dim {
in_grad[p] = self.mask[p] * out_grad[p];
}
}
}
}
|
self.node.push(epoch);
assert!(self.node.limit(1));
apply(self);
self.in_op.borrow_mut()._traverse_bwd(epoch, apply);
|
random_line_split
|
dropout.rs
|
use prelude::*;
use kernels::ffi::*;
use densearray::prelude::*;
use operator::prelude::*;
use rng::xorshift::{Xorshiftplus128Rng};
use rand::{Rng, thread_rng};
//use rand::distributions::{IndependentSample};
//use rand::distributions::range::{Range};
use std::cell::{RefCell};
use std::cmp::{max};
use std::rc::{Rc};
#[derive(Clone, Debug)]
pub struct DropoutOperatorConfig {
pub batch_sz: usize,
pub dim: usize,
pub drop_frac: f32,
}
pub struct DropoutOperator<S, IoBuf:?Sized> {
cfg: DropoutOperatorConfig,
node: OperatorNode,
in_op: Rc<RefCell<DiffOperator<S, IoBuf>>>,
in_: CommonOutput,
out: CommonOutput,
mask: Vec<f32>,
rng: Xorshiftplus128Rng,
//dist: Range<f32>,
}
impl<S, IoBuf:?Sized> DropoutOperator<S, IoBuf> {
pub fn new<InOp>(cfg: DropoutOperatorConfig, cap: OpCapability, prev_op: Rc<RefCell<InOp>>, prev_arm: usize) -> Rc<RefCell<DropoutOperator<S, IoBuf>>> where InOp:'static + CommonOperator + DiffOperator<S, IoBuf> {
let in_ = prev_op.borrow()._output(prev_arm);
let out = CommonOutput::new(cfg.batch_sz, cfg.dim, cap);
let mut mask = Vec::with_capacity(cfg.batch_sz * cfg.dim);
mask.resize(cfg.batch_sz * cfg.dim, 0.0);
Rc::new(RefCell::new(DropoutOperator{
cfg: cfg,
node: OperatorNode::default(),
in_op: prev_op,
in_: in_,
out: out,
mask: mask,
rng: Xorshiftplus128Rng::new(&mut thread_rng()),
//dist: Range::new(0.0, 1.0),
}))
}
}
impl<S, IoBuf:?Sized> Operator for DropoutOperator<S, IoBuf> {
fn _next(&self) -> u64 {
self.node._next()
}
}
impl<S, IoBuf:?Sized> CommonOperator for DropoutOperator<S, IoBuf> {
fn _output(&self, arm: usize) -> CommonOutput {
assert_eq!(0, arm);
self.out.clone()
}
}
impl<S, IoBuf:?Sized> DiffOperatorData<S> for DropoutOperator<S, IoBuf> {
}
impl<S, IoBuf:?Sized> DiffOperatorIo<IoBuf> for DropoutOperator<S, IoBuf> {
}
impl<S, IoBuf:?Sized> DiffOperator<S, IoBuf> for DropoutOperator<S, IoBuf> {
fn _traverse_fwd(&mut self, epoch: u64, apply: &mut FnMut(&mut DiffOperator<S, IoBuf>)) {
self.node.push(epoch);
assert!(self.node.limit(1));
self.in_op.borrow_mut()._traverse_fwd(epoch, apply);
apply(self);
self.node.pop(epoch);
}
fn _traverse_bwd(&mut self, epoch: u64, apply: &mut FnMut(&mut DiffOperator<S, IoBuf>)) {
self.node.push(epoch);
assert!(self.node.limit(1));
apply(self);
self.in_op.borrow_mut()._traverse_bwd(epoch, apply);
self.node.pop(epoch);
}
fn _forward(&mut self, phase: OpPhase) {
let batch_size = self.in_.batch_sz.get();
assert!(batch_size <= self.cfg.batch_sz);
self.out.batch_sz.set(batch_size);
match phase {
OpPhase::Inference => {
self.out.buf.borrow_mut()[.. batch_size * self.cfg.dim]
.copy_from_slice(&self.in_.buf.borrow()[.. batch_size * self.cfg.dim]);
}
OpPhase::Learning => {
let in_buf = &self.in_.buf.borrow()[.. batch_size * self.cfg.dim];
let out_buf = &mut self.out.buf.borrow_mut()[.. batch_size * self.cfg.dim];
for p in 0.. batch_size * self.cfg.dim {
let u: f32 = self.rng.gen();
if u < self.cfg.drop_frac {
self.mask[p] = 0.0;
out_buf[p] = 0.0;
} else {
self.mask[p] = 1.0;
out_buf[p] = in_buf[p];
}
}
}
}
}
fn
|
(&mut self) {
let batch_size = self.out.batch_sz.get();
if let Some(in_grad) = self.in_.grad.as_ref() {
let in_grad = &mut in_grad.borrow_mut()[.. batch_size * self.cfg.dim];
let out_grad = &self.out.grad.as_ref().unwrap().borrow()[.. batch_size * self.cfg.dim];
for p in 0.. batch_size * self.cfg.dim {
in_grad[p] = self.mask[p] * out_grad[p];
}
}
}
}
|
_backward
|
identifier_name
|
dropout.rs
|
use prelude::*;
use kernels::ffi::*;
use densearray::prelude::*;
use operator::prelude::*;
use rng::xorshift::{Xorshiftplus128Rng};
use rand::{Rng, thread_rng};
//use rand::distributions::{IndependentSample};
//use rand::distributions::range::{Range};
use std::cell::{RefCell};
use std::cmp::{max};
use std::rc::{Rc};
#[derive(Clone, Debug)]
pub struct DropoutOperatorConfig {
pub batch_sz: usize,
pub dim: usize,
pub drop_frac: f32,
}
pub struct DropoutOperator<S, IoBuf:?Sized> {
cfg: DropoutOperatorConfig,
node: OperatorNode,
in_op: Rc<RefCell<DiffOperator<S, IoBuf>>>,
in_: CommonOutput,
out: CommonOutput,
mask: Vec<f32>,
rng: Xorshiftplus128Rng,
//dist: Range<f32>,
}
impl<S, IoBuf:?Sized> DropoutOperator<S, IoBuf> {
pub fn new<InOp>(cfg: DropoutOperatorConfig, cap: OpCapability, prev_op: Rc<RefCell<InOp>>, prev_arm: usize) -> Rc<RefCell<DropoutOperator<S, IoBuf>>> where InOp:'static + CommonOperator + DiffOperator<S, IoBuf>
|
}
impl<S, IoBuf:?Sized> Operator for DropoutOperator<S, IoBuf> {
fn _next(&self) -> u64 {
self.node._next()
}
}
impl<S, IoBuf:?Sized> CommonOperator for DropoutOperator<S, IoBuf> {
fn _output(&self, arm: usize) -> CommonOutput {
assert_eq!(0, arm);
self.out.clone()
}
}
impl<S, IoBuf:?Sized> DiffOperatorData<S> for DropoutOperator<S, IoBuf> {
}
impl<S, IoBuf:?Sized> DiffOperatorIo<IoBuf> for DropoutOperator<S, IoBuf> {
}
impl<S, IoBuf:?Sized> DiffOperator<S, IoBuf> for DropoutOperator<S, IoBuf> {
fn _traverse_fwd(&mut self, epoch: u64, apply: &mut FnMut(&mut DiffOperator<S, IoBuf>)) {
self.node.push(epoch);
assert!(self.node.limit(1));
self.in_op.borrow_mut()._traverse_fwd(epoch, apply);
apply(self);
self.node.pop(epoch);
}
fn _traverse_bwd(&mut self, epoch: u64, apply: &mut FnMut(&mut DiffOperator<S, IoBuf>)) {
self.node.push(epoch);
assert!(self.node.limit(1));
apply(self);
self.in_op.borrow_mut()._traverse_bwd(epoch, apply);
self.node.pop(epoch);
}
fn _forward(&mut self, phase: OpPhase) {
let batch_size = self.in_.batch_sz.get();
assert!(batch_size <= self.cfg.batch_sz);
self.out.batch_sz.set(batch_size);
match phase {
OpPhase::Inference => {
self.out.buf.borrow_mut()[.. batch_size * self.cfg.dim]
.copy_from_slice(&self.in_.buf.borrow()[.. batch_size * self.cfg.dim]);
}
OpPhase::Learning => {
let in_buf = &self.in_.buf.borrow()[.. batch_size * self.cfg.dim];
let out_buf = &mut self.out.buf.borrow_mut()[.. batch_size * self.cfg.dim];
for p in 0.. batch_size * self.cfg.dim {
let u: f32 = self.rng.gen();
if u < self.cfg.drop_frac {
self.mask[p] = 0.0;
out_buf[p] = 0.0;
} else {
self.mask[p] = 1.0;
out_buf[p] = in_buf[p];
}
}
}
}
}
fn _backward(&mut self) {
let batch_size = self.out.batch_sz.get();
if let Some(in_grad) = self.in_.grad.as_ref() {
let in_grad = &mut in_grad.borrow_mut()[.. batch_size * self.cfg.dim];
let out_grad = &self.out.grad.as_ref().unwrap().borrow()[.. batch_size * self.cfg.dim];
for p in 0.. batch_size * self.cfg.dim {
in_grad[p] = self.mask[p] * out_grad[p];
}
}
}
}
|
{
let in_ = prev_op.borrow()._output(prev_arm);
let out = CommonOutput::new(cfg.batch_sz, cfg.dim, cap);
let mut mask = Vec::with_capacity(cfg.batch_sz * cfg.dim);
mask.resize(cfg.batch_sz * cfg.dim, 0.0);
Rc::new(RefCell::new(DropoutOperator{
cfg: cfg,
node: OperatorNode::default(),
in_op: prev_op,
in_: in_,
out: out,
mask: mask,
rng: Xorshiftplus128Rng::new(&mut thread_rng()),
//dist: Range::new(0.0, 1.0),
}))
}
|
identifier_body
|
meta.rs
|
use error::ErrorKind::BadMetadataSyntax;
use error::{InFile, Result};
use name::WadName;
use regex::Regex;
use rustc_serialize::{Decodable, Encodable};
use std::fs::File;
use std::io::Read;
use std::path::Path;
use toml::{Decoder, Parser, Value};
use types::ThingType;
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct SkyMetadata {
pub texture_name: WadName,
pub level_pattern: String,
pub tiled_band_size: f32,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct AnimationMetadata {
pub flats: Vec<Vec<WadName>>,
pub walls: Vec<Vec<WadName>>,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct ThingMetadata {
pub thing_type: ThingType,
pub sprite: String,
pub sequence: String,
pub hanging: bool,
pub radius: u32,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct ThingDirectoryMetadata {
pub decorations: Vec<ThingMetadata>,
pub weapons: Vec<ThingMetadata>,
pub powerups: Vec<ThingMetadata>,
pub artifacts: Vec<ThingMetadata>,
pub ammo: Vec<ThingMetadata>,
pub keys: Vec<ThingMetadata>,
pub monsters: Vec<ThingMetadata>,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct WadMetadata {
pub sky: Vec<SkyMetadata>,
pub animations: AnimationMetadata,
pub things: ThingDirectoryMetadata,
}
impl WadMetadata {
pub fn from_file<P: AsRef<Path>>(path: &P) -> Result<WadMetadata> {
let mut contents = String::new();
let path = path.as_ref();
try!(try!(File::open(path)).read_to_string(&mut contents));
WadMetadata::from_text(&contents).in_file(path)
}
pub fn from_text(text: &str) -> Result<WadMetadata> {
let mut parser = Parser::new(text);
parser.parse()
.ok_or_else(move || BadMetadataSyntax(parser.errors).into())
.and_then(|value| {
Decodable::decode(&mut Decoder::new(Value::Table(value))).map_err(|e| e.into())
})
}
pub fn sky_for(&self, name: &WadName) -> Option<&SkyMetadata> {
self.sky
.iter()
.find(|sky| {
Regex::new(&sky.level_pattern)
.map(|r| r.is_match(name.as_ref()))
.unwrap_or_else(|_| {
warn!("Invalid level pattern {} for sky {}.",
sky.level_pattern,
sky.texture_name);
false
})
})
.or_else(|| {
if let Some(sky) = self.sky.get(0) {
warn!("No sky found for level {}, using {}.",
name,
sky.texture_name);
Some(sky)
} else {
error!("No sky metadata provided.");
None
}
})
}
pub fn find_thing(&self, thing_type: ThingType) -> Option<&ThingMetadata> {
self.things
.decorations
.iter()
.find(|t| t.thing_type == thing_type)
.or_else(|| self.things.weapons.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.powerups.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.artifacts.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.ammo.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.keys.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.monsters.iter().find(|t| t.thing_type == thing_type))
}
}
#[cfg(test)]
mod test {
use super::WadMetadata;
#[test]
fn
|
() {
WadMetadata::from_text(r#"
[[sky]]
level_pattern = "MAP(0[1-9]|10|11)"
texture_name = "SKY1"
tiled_band_size = 0.15
[[sky]]
level_pattern = "MAP(1[2-9]|20)"
texture_name = "SKY2"
tiled_band_size = 0.15
[[sky]]
level_pattern = "MAP(2[1-9]|32)"
texture_name = "SKY3"
tiled_band_size = 0.15
[animations]
flats = [
["NUKAGE1", "NUKAGE2", "NUKAGE3"],
[],
]
walls = [
[],
["DBRAIN1", "DBRAIN2", "DBRAIN3", "DBRAIN4"],
]
[things]
[[things.decoration]]
thing_type = 10
sprite = "PLAY"
sequence = "W"
obstacle = false
hanging = false
[[things.decoration]]
thing_type = 12
sprite = "PLAY"
sequence = "W"
obstacle = false
hanging = false
"#)
.ok()
.expect("test: could not parse test metadata");
}
}
|
test_wad_metadata
|
identifier_name
|
meta.rs
|
use error::ErrorKind::BadMetadataSyntax;
use error::{InFile, Result};
use name::WadName;
use regex::Regex;
use rustc_serialize::{Decodable, Encodable};
use std::fs::File;
use std::io::Read;
use std::path::Path;
use toml::{Decoder, Parser, Value};
use types::ThingType;
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct SkyMetadata {
pub texture_name: WadName,
pub level_pattern: String,
pub tiled_band_size: f32,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct AnimationMetadata {
pub flats: Vec<Vec<WadName>>,
pub walls: Vec<Vec<WadName>>,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct ThingMetadata {
pub thing_type: ThingType,
pub sprite: String,
pub sequence: String,
pub hanging: bool,
pub radius: u32,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct ThingDirectoryMetadata {
pub decorations: Vec<ThingMetadata>,
pub weapons: Vec<ThingMetadata>,
pub powerups: Vec<ThingMetadata>,
pub artifacts: Vec<ThingMetadata>,
pub ammo: Vec<ThingMetadata>,
pub keys: Vec<ThingMetadata>,
pub monsters: Vec<ThingMetadata>,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct WadMetadata {
pub sky: Vec<SkyMetadata>,
pub animations: AnimationMetadata,
pub things: ThingDirectoryMetadata,
}
impl WadMetadata {
pub fn from_file<P: AsRef<Path>>(path: &P) -> Result<WadMetadata> {
let mut contents = String::new();
let path = path.as_ref();
try!(try!(File::open(path)).read_to_string(&mut contents));
WadMetadata::from_text(&contents).in_file(path)
}
pub fn from_text(text: &str) -> Result<WadMetadata> {
let mut parser = Parser::new(text);
parser.parse()
.ok_or_else(move || BadMetadataSyntax(parser.errors).into())
.and_then(|value| {
Decodable::decode(&mut Decoder::new(Value::Table(value))).map_err(|e| e.into())
})
}
pub fn sky_for(&self, name: &WadName) -> Option<&SkyMetadata> {
self.sky
.iter()
.find(|sky| {
Regex::new(&sky.level_pattern)
.map(|r| r.is_match(name.as_ref()))
.unwrap_or_else(|_| {
warn!("Invalid level pattern {} for sky {}.",
sky.level_pattern,
sky.texture_name);
false
})
})
.or_else(|| {
if let Some(sky) = self.sky.get(0) {
warn!("No sky found for level {}, using {}.",
name,
sky.texture_name);
Some(sky)
} else
|
})
}
pub fn find_thing(&self, thing_type: ThingType) -> Option<&ThingMetadata> {
self.things
.decorations
.iter()
.find(|t| t.thing_type == thing_type)
.or_else(|| self.things.weapons.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.powerups.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.artifacts.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.ammo.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.keys.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.monsters.iter().find(|t| t.thing_type == thing_type))
}
}
#[cfg(test)]
mod test {
use super::WadMetadata;
#[test]
fn test_wad_metadata() {
WadMetadata::from_text(r#"
[[sky]]
level_pattern = "MAP(0[1-9]|10|11)"
texture_name = "SKY1"
tiled_band_size = 0.15
[[sky]]
level_pattern = "MAP(1[2-9]|20)"
texture_name = "SKY2"
tiled_band_size = 0.15
[[sky]]
level_pattern = "MAP(2[1-9]|32)"
texture_name = "SKY3"
tiled_band_size = 0.15
[animations]
flats = [
["NUKAGE1", "NUKAGE2", "NUKAGE3"],
[],
]
walls = [
[],
["DBRAIN1", "DBRAIN2", "DBRAIN3", "DBRAIN4"],
]
[things]
[[things.decoration]]
thing_type = 10
sprite = "PLAY"
sequence = "W"
obstacle = false
hanging = false
[[things.decoration]]
thing_type = 12
sprite = "PLAY"
sequence = "W"
obstacle = false
hanging = false
"#)
.ok()
.expect("test: could not parse test metadata");
}
}
|
{
error!("No sky metadata provided.");
None
}
|
conditional_block
|
meta.rs
|
use error::ErrorKind::BadMetadataSyntax;
use error::{InFile, Result};
use name::WadName;
use regex::Regex;
use rustc_serialize::{Decodable, Encodable};
use std::fs::File;
use std::io::Read;
use std::path::Path;
use toml::{Decoder, Parser, Value};
use types::ThingType;
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct SkyMetadata {
pub texture_name: WadName,
pub level_pattern: String,
pub tiled_band_size: f32,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct AnimationMetadata {
pub flats: Vec<Vec<WadName>>,
pub walls: Vec<Vec<WadName>>,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct ThingMetadata {
pub thing_type: ThingType,
pub sprite: String,
pub sequence: String,
pub hanging: bool,
pub radius: u32,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct ThingDirectoryMetadata {
pub decorations: Vec<ThingMetadata>,
pub weapons: Vec<ThingMetadata>,
pub powerups: Vec<ThingMetadata>,
pub artifacts: Vec<ThingMetadata>,
pub ammo: Vec<ThingMetadata>,
pub keys: Vec<ThingMetadata>,
pub monsters: Vec<ThingMetadata>,
}
#[derive(Debug, RustcDecodable, RustcEncodable)]
pub struct WadMetadata {
pub sky: Vec<SkyMetadata>,
pub animations: AnimationMetadata,
pub things: ThingDirectoryMetadata,
}
impl WadMetadata {
pub fn from_file<P: AsRef<Path>>(path: &P) -> Result<WadMetadata> {
let mut contents = String::new();
let path = path.as_ref();
try!(try!(File::open(path)).read_to_string(&mut contents));
WadMetadata::from_text(&contents).in_file(path)
}
pub fn from_text(text: &str) -> Result<WadMetadata> {
let mut parser = Parser::new(text);
parser.parse()
.ok_or_else(move || BadMetadataSyntax(parser.errors).into())
.and_then(|value| {
Decodable::decode(&mut Decoder::new(Value::Table(value))).map_err(|e| e.into())
|
pub fn sky_for(&self, name: &WadName) -> Option<&SkyMetadata> {
self.sky
.iter()
.find(|sky| {
Regex::new(&sky.level_pattern)
.map(|r| r.is_match(name.as_ref()))
.unwrap_or_else(|_| {
warn!("Invalid level pattern {} for sky {}.",
sky.level_pattern,
sky.texture_name);
false
})
})
.or_else(|| {
if let Some(sky) = self.sky.get(0) {
warn!("No sky found for level {}, using {}.",
name,
sky.texture_name);
Some(sky)
} else {
error!("No sky metadata provided.");
None
}
})
}
pub fn find_thing(&self, thing_type: ThingType) -> Option<&ThingMetadata> {
self.things
.decorations
.iter()
.find(|t| t.thing_type == thing_type)
.or_else(|| self.things.weapons.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.powerups.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.artifacts.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.ammo.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.keys.iter().find(|t| t.thing_type == thing_type))
.or_else(|| self.things.monsters.iter().find(|t| t.thing_type == thing_type))
}
}
#[cfg(test)]
mod test {
use super::WadMetadata;
#[test]
fn test_wad_metadata() {
WadMetadata::from_text(r#"
[[sky]]
level_pattern = "MAP(0[1-9]|10|11)"
texture_name = "SKY1"
tiled_band_size = 0.15
[[sky]]
level_pattern = "MAP(1[2-9]|20)"
texture_name = "SKY2"
tiled_band_size = 0.15
[[sky]]
level_pattern = "MAP(2[1-9]|32)"
texture_name = "SKY3"
tiled_band_size = 0.15
[animations]
flats = [
["NUKAGE1", "NUKAGE2", "NUKAGE3"],
[],
]
walls = [
[],
["DBRAIN1", "DBRAIN2", "DBRAIN3", "DBRAIN4"],
]
[things]
[[things.decoration]]
thing_type = 10
sprite = "PLAY"
sequence = "W"
obstacle = false
hanging = false
[[things.decoration]]
thing_type = 12
sprite = "PLAY"
sequence = "W"
obstacle = false
hanging = false
"#)
.ok()
.expect("test: could not parse test metadata");
}
}
|
})
}
|
random_line_split
|
path_pass.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Records the full path to items
use astsrv;
use doc::ItemUtils;
use doc;
use fold::Fold;
use fold;
use pass::Pass;
#[cfg(test)] use extract;
use syntax::ast;
pub fn mk_pass() -> Pass {
Pass {
name: ~"path",
f: run
}
}
struct Ctxt {
srv: astsrv::Srv,
path: @mut ~[~str]
}
impl Clone for Ctxt {
fn clone(&self) -> Ctxt {
Ctxt {
srv: self.srv.clone(),
path: @mut copy *self.path
}
}
}
#[allow(non_implicitly_copyable_typarams)]
fn run(srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
let ctxt = Ctxt {
srv: srv,
path: @mut ~[]
};
let fold = Fold {
ctxt: ctxt.clone(),
fold_item: fold_item,
fold_mod: fold_mod,
fold_nmod: fold_nmod,
.. fold::default_any_fold(ctxt)
};
(fold.fold_doc)(&fold, doc)
}
fn fold_item(fold: &fold::Fold<Ctxt>, doc: doc::ItemDoc) -> doc::ItemDoc {
doc::ItemDoc {
path: copy *fold.ctxt.path,
.. doc
}
}
#[allow(non_implicitly_copyable_typarams)]
fn fold_mod(fold: &fold::Fold<Ctxt>, doc: doc::ModDoc) -> doc::ModDoc {
let is_topmod = doc.id() == ast::crate_node_id;
if!is_topmod { fold.ctxt.path.push(doc.name()); }
let doc = fold::default_any_fold_mod(fold, doc);
if!is_topmod
|
doc::ModDoc {
item: (fold.fold_item)(fold, copy doc.item),
.. doc
}
}
fn fold_nmod(fold: &fold::Fold<Ctxt>, doc: doc::NmodDoc) -> doc::NmodDoc {
fold.ctxt.path.push(doc.name());
let doc = fold::default_seq_fold_nmod(fold, doc);
fold.ctxt.path.pop();
doc::NmodDoc {
item: (fold.fold_item)(fold, copy doc.item),
.. doc
}
}
#[test]
fn should_record_mod_paths() {
let source = ~"mod a { mod b { mod c { } } mod d { mod e { } } }";
do astsrv::from_str(source) |srv| {
let doc = extract::from_srv(srv.clone(), ~"");
let doc = run(srv.clone(), doc);
assert!(doc.cratemod().mods()[0].mods()[0].mods()[0].path()
== ~[~"a", ~"b"]);
assert!(doc.cratemod().mods()[0].mods()[1].mods()[0].path()
== ~[~"a", ~"d"]);
}
}
#[test]
fn should_record_fn_paths() {
let source = ~"mod a { fn b() { } }";
do astsrv::from_str(source) |srv| {
let doc = extract::from_srv(srv.clone(), ~"");
let doc = run(srv.clone(), doc);
assert_eq!(doc.cratemod().mods()[0].fns()[0].path(), ~[~"a"]);
}
}
|
{ fold.ctxt.path.pop(); }
|
conditional_block
|
path_pass.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Records the full path to items
use astsrv;
use doc::ItemUtils;
use doc;
use fold::Fold;
use fold;
use pass::Pass;
#[cfg(test)] use extract;
use syntax::ast;
pub fn
|
() -> Pass {
Pass {
name: ~"path",
f: run
}
}
struct Ctxt {
srv: astsrv::Srv,
path: @mut ~[~str]
}
impl Clone for Ctxt {
fn clone(&self) -> Ctxt {
Ctxt {
srv: self.srv.clone(),
path: @mut copy *self.path
}
}
}
#[allow(non_implicitly_copyable_typarams)]
fn run(srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
let ctxt = Ctxt {
srv: srv,
path: @mut ~[]
};
let fold = Fold {
ctxt: ctxt.clone(),
fold_item: fold_item,
fold_mod: fold_mod,
fold_nmod: fold_nmod,
.. fold::default_any_fold(ctxt)
};
(fold.fold_doc)(&fold, doc)
}
fn fold_item(fold: &fold::Fold<Ctxt>, doc: doc::ItemDoc) -> doc::ItemDoc {
doc::ItemDoc {
path: copy *fold.ctxt.path,
.. doc
}
}
#[allow(non_implicitly_copyable_typarams)]
fn fold_mod(fold: &fold::Fold<Ctxt>, doc: doc::ModDoc) -> doc::ModDoc {
let is_topmod = doc.id() == ast::crate_node_id;
if!is_topmod { fold.ctxt.path.push(doc.name()); }
let doc = fold::default_any_fold_mod(fold, doc);
if!is_topmod { fold.ctxt.path.pop(); }
doc::ModDoc {
item: (fold.fold_item)(fold, copy doc.item),
.. doc
}
}
fn fold_nmod(fold: &fold::Fold<Ctxt>, doc: doc::NmodDoc) -> doc::NmodDoc {
fold.ctxt.path.push(doc.name());
let doc = fold::default_seq_fold_nmod(fold, doc);
fold.ctxt.path.pop();
doc::NmodDoc {
item: (fold.fold_item)(fold, copy doc.item),
.. doc
}
}
#[test]
fn should_record_mod_paths() {
let source = ~"mod a { mod b { mod c { } } mod d { mod e { } } }";
do astsrv::from_str(source) |srv| {
let doc = extract::from_srv(srv.clone(), ~"");
let doc = run(srv.clone(), doc);
assert!(doc.cratemod().mods()[0].mods()[0].mods()[0].path()
== ~[~"a", ~"b"]);
assert!(doc.cratemod().mods()[0].mods()[1].mods()[0].path()
== ~[~"a", ~"d"]);
}
}
#[test]
fn should_record_fn_paths() {
let source = ~"mod a { fn b() { } }";
do astsrv::from_str(source) |srv| {
let doc = extract::from_srv(srv.clone(), ~"");
let doc = run(srv.clone(), doc);
assert_eq!(doc.cratemod().mods()[0].fns()[0].path(), ~[~"a"]);
}
}
|
mk_pass
|
identifier_name
|
path_pass.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Records the full path to items
use astsrv;
use doc::ItemUtils;
use doc;
use fold::Fold;
use fold;
use pass::Pass;
|
Pass {
name: ~"path",
f: run
}
}
struct Ctxt {
srv: astsrv::Srv,
path: @mut ~[~str]
}
impl Clone for Ctxt {
fn clone(&self) -> Ctxt {
Ctxt {
srv: self.srv.clone(),
path: @mut copy *self.path
}
}
}
#[allow(non_implicitly_copyable_typarams)]
fn run(srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
let ctxt = Ctxt {
srv: srv,
path: @mut ~[]
};
let fold = Fold {
ctxt: ctxt.clone(),
fold_item: fold_item,
fold_mod: fold_mod,
fold_nmod: fold_nmod,
.. fold::default_any_fold(ctxt)
};
(fold.fold_doc)(&fold, doc)
}
fn fold_item(fold: &fold::Fold<Ctxt>, doc: doc::ItemDoc) -> doc::ItemDoc {
doc::ItemDoc {
path: copy *fold.ctxt.path,
.. doc
}
}
#[allow(non_implicitly_copyable_typarams)]
fn fold_mod(fold: &fold::Fold<Ctxt>, doc: doc::ModDoc) -> doc::ModDoc {
let is_topmod = doc.id() == ast::crate_node_id;
if!is_topmod { fold.ctxt.path.push(doc.name()); }
let doc = fold::default_any_fold_mod(fold, doc);
if!is_topmod { fold.ctxt.path.pop(); }
doc::ModDoc {
item: (fold.fold_item)(fold, copy doc.item),
.. doc
}
}
fn fold_nmod(fold: &fold::Fold<Ctxt>, doc: doc::NmodDoc) -> doc::NmodDoc {
fold.ctxt.path.push(doc.name());
let doc = fold::default_seq_fold_nmod(fold, doc);
fold.ctxt.path.pop();
doc::NmodDoc {
item: (fold.fold_item)(fold, copy doc.item),
.. doc
}
}
#[test]
fn should_record_mod_paths() {
let source = ~"mod a { mod b { mod c { } } mod d { mod e { } } }";
do astsrv::from_str(source) |srv| {
let doc = extract::from_srv(srv.clone(), ~"");
let doc = run(srv.clone(), doc);
assert!(doc.cratemod().mods()[0].mods()[0].mods()[0].path()
== ~[~"a", ~"b"]);
assert!(doc.cratemod().mods()[0].mods()[1].mods()[0].path()
== ~[~"a", ~"d"]);
}
}
#[test]
fn should_record_fn_paths() {
let source = ~"mod a { fn b() { } }";
do astsrv::from_str(source) |srv| {
let doc = extract::from_srv(srv.clone(), ~"");
let doc = run(srv.clone(), doc);
assert_eq!(doc.cratemod().mods()[0].fns()[0].path(), ~[~"a"]);
}
}
|
#[cfg(test)] use extract;
use syntax::ast;
pub fn mk_pass() -> Pass {
|
random_line_split
|
path_pass.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Records the full path to items
use astsrv;
use doc::ItemUtils;
use doc;
use fold::Fold;
use fold;
use pass::Pass;
#[cfg(test)] use extract;
use syntax::ast;
pub fn mk_pass() -> Pass {
Pass {
name: ~"path",
f: run
}
}
struct Ctxt {
srv: astsrv::Srv,
path: @mut ~[~str]
}
impl Clone for Ctxt {
fn clone(&self) -> Ctxt
|
}
#[allow(non_implicitly_copyable_typarams)]
fn run(srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
let ctxt = Ctxt {
srv: srv,
path: @mut ~[]
};
let fold = Fold {
ctxt: ctxt.clone(),
fold_item: fold_item,
fold_mod: fold_mod,
fold_nmod: fold_nmod,
.. fold::default_any_fold(ctxt)
};
(fold.fold_doc)(&fold, doc)
}
fn fold_item(fold: &fold::Fold<Ctxt>, doc: doc::ItemDoc) -> doc::ItemDoc {
doc::ItemDoc {
path: copy *fold.ctxt.path,
.. doc
}
}
#[allow(non_implicitly_copyable_typarams)]
fn fold_mod(fold: &fold::Fold<Ctxt>, doc: doc::ModDoc) -> doc::ModDoc {
let is_topmod = doc.id() == ast::crate_node_id;
if!is_topmod { fold.ctxt.path.push(doc.name()); }
let doc = fold::default_any_fold_mod(fold, doc);
if!is_topmod { fold.ctxt.path.pop(); }
doc::ModDoc {
item: (fold.fold_item)(fold, copy doc.item),
.. doc
}
}
fn fold_nmod(fold: &fold::Fold<Ctxt>, doc: doc::NmodDoc) -> doc::NmodDoc {
fold.ctxt.path.push(doc.name());
let doc = fold::default_seq_fold_nmod(fold, doc);
fold.ctxt.path.pop();
doc::NmodDoc {
item: (fold.fold_item)(fold, copy doc.item),
.. doc
}
}
#[test]
fn should_record_mod_paths() {
let source = ~"mod a { mod b { mod c { } } mod d { mod e { } } }";
do astsrv::from_str(source) |srv| {
let doc = extract::from_srv(srv.clone(), ~"");
let doc = run(srv.clone(), doc);
assert!(doc.cratemod().mods()[0].mods()[0].mods()[0].path()
== ~[~"a", ~"b"]);
assert!(doc.cratemod().mods()[0].mods()[1].mods()[0].path()
== ~[~"a", ~"d"]);
}
}
#[test]
fn should_record_fn_paths() {
let source = ~"mod a { fn b() { } }";
do astsrv::from_str(source) |srv| {
let doc = extract::from_srv(srv.clone(), ~"");
let doc = run(srv.clone(), doc);
assert_eq!(doc.cratemod().mods()[0].fns()[0].path(), ~[~"a"]);
}
}
|
{
Ctxt {
srv: self.srv.clone(),
path: @mut copy *self.path
}
}
|
identifier_body
|
star.rs
|
/*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
//! Stars
/**
Computes the combined magnitude of two stars
# Arguments
* `m1`: Magnitude of star 1
* `m2`: Magnitude of star 2
**/
#[inline]
pub fn combined_mag(m1: f64, m2: f64) -> f64
{
m2 - 2.5 * (brightness_ratio(m1, m2) + 1.0)
}
/**
Computes the combined magnitude of two or more stars
# Arguments
* `m`: Array of magnitudes of stars
**/
pub fn combined_mag_of_many(m: &[f64]) -> f64
{
let mut sum = 0.0;
for i in m.iter() {
sum += 10_f64.powf(-0.4 * i);
}
-2.5 * sum.log10()
}
/**
Computes the brightness ratio of two stars
# Arguments
* `m1`: Magnitude of star 1
* `m2`: Magnitude of star 2
**/
#[inline]
pub fn brightness_ratio(m1: f64, m2: f64) -> f64
{
10.0_f64.powf(0.4 * (m2 - m1))
}
/**
Computes the difference in magnitude of two stars
# Arguments
* `br`: Brightness ratio of two stars
**/
#[inline]
pub fn mag_diff(br: f64) -> f64
{
2.5 * br.log10()
}
/**
Computes the absolute magnitude of a star from its parallax
# Arguments
* `par`: Parallax of the star
* `am`: Apparent magnitude of the star
**/
#[inline]
pub fn abs_mag_frm_parallax(par: f64, am: f64) -> f64
{
am + 5.0 + 5.0*(par.to_degrees() * 3600.0).log10()
}
/**
Computes the absolute magnitude of a star from its distance from earth
# Arguments
* `d`: The star's to earth *(parsecs)*
* `am`: Apparent magnitude of the star
**/
#[inline]
pub fn abs_mag_frm_dist(d: f64, am: f64) -> f64
{
am + 5.0 - 5.0*d.log10()
}
/**
Computes the angle between a vector from a star to the
north celestial pole of the Earth and a vector from the
same star to the north pole of the ecliptic
# Returns
* `angle`: The desired angle *| in radians*
# Arguments
* `eclip_long`: The star's ecliptical longitude *| in radians*
* `eclip_lat`: The star's ecliptical latitude *| in radians*
* `oblq_eclip`: Obliquity of the ecliptic *| in radians*
**/
#[inline]
pub fn angl_between_north_celes_and_eclip_pole(eclip_long: f64,
eclip_lat: f64,
oblq_eclip: f64) -> f64
{
(eclip_long.cos() * oblq_eclip.tan()).atan2 (
eclip_lat.sin() * eclip_long.sin() * oblq_eclip.tan()
- eclip_lat.cos()
)
}
/**
Computes the equatorial coordinates of a star at
at a different time from it's motion in space
This function Computes the equatorial coordinates
of a star at a different time by taking into account
it's proper motion, distance and radial velocity.
# Returns
|
* `new_asc`: Right ascension at the different
time *| in radians*
* `new_dec`: Declination at the different
time *| in radians*
# Arguments
* `asc0`: Right ascension of the star initially *| in radians*
* `dec0`: Declination of the star initially *| in radians*
* `r`: Distance of the star (*parsecs*)
* `delta_r`: Radial velocity of the star (*parsecs/second*)
* `proper_motion_asc`: Proper motion of the star in right ascension
*| in radians*
* `proper_motion_dec`: Proper motion of the star in declination
*| in radians*
* `t`: Decimal years from the inital time; negative in the past
and positive in the future
**/
pub fn eq_coords_frm_motion(asc0: f64,
dec0: f64,
r: f64,
delta_r: f64,
proper_motion_asc: f64,
proper_motion_dec: f64,
t: f64) -> (f64, f64)
{
let x = r * dec0.cos() * asc0.cos();
let y = r * dec0.cos() * asc0.sin();
let z = r * dec0.sin();
let delta_asc = 3600.0 * proper_motion_asc.to_degrees()/13751.0;
let delta_dec = 3600.0 * proper_motion_dec.to_degrees()/206265.0;
let delta_x = (x / r)*delta_r - z*delta_dec*asc0.cos() - y*delta_asc;
let delta_y = (y / r)*delta_r - z*delta_dec*asc0.sin() + x*delta_asc;
let delta_z = (z / r)*delta_r + r*delta_dec*dec0.cos();
let x1 = x + t*delta_x;
let y1 = y + t*delta_y;
let z1 = z + t*delta_z;
let asc = y1.atan2(x1);
let dec = z1.atan2((x1*x1 + y1*y1).sqrt());
(asc, dec)
}
pub fn proper_motion_in_eq_coords(asc: f64,
dec: f64,
pmotion_asc: f64,
pmotion_dec: f64,
ecl_lat: f64,
oblq_eclip: f64) -> (f64, f64)
{
let ecl_lat_cos = ecl_lat.cos();
let pmotion_long = (
pmotion_dec * oblq_eclip.sin() * asc.cos()
+ pmotion_asc * dec.cos() * (
oblq_eclip.cos() * dec.cos()
+ oblq_eclip.sin() * dec.sin() * asc.sin()
)
) / (ecl_lat_cos * ecl_lat_cos);
let pmotion_lat = (
pmotion_dec * (
oblq_eclip.cos() * dec.cos()
+ oblq_eclip.sin() * dec.sin() * asc.sin()
)
- pmotion_asc * oblq_eclip.sin() * asc.cos() * dec.cos()
) / ecl_lat_cos;
(pmotion_long, pmotion_lat)
}
|
`(new_asc, new_dec)`
|
random_line_split
|
star.rs
|
/*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
//! Stars
/**
Computes the combined magnitude of two stars
# Arguments
* `m1`: Magnitude of star 1
* `m2`: Magnitude of star 2
**/
#[inline]
pub fn combined_mag(m1: f64, m2: f64) -> f64
{
m2 - 2.5 * (brightness_ratio(m1, m2) + 1.0)
}
/**
Computes the combined magnitude of two or more stars
# Arguments
* `m`: Array of magnitudes of stars
**/
pub fn combined_mag_of_many(m: &[f64]) -> f64
{
let mut sum = 0.0;
for i in m.iter() {
sum += 10_f64.powf(-0.4 * i);
}
-2.5 * sum.log10()
}
/**
Computes the brightness ratio of two stars
# Arguments
* `m1`: Magnitude of star 1
* `m2`: Magnitude of star 2
**/
#[inline]
pub fn brightness_ratio(m1: f64, m2: f64) -> f64
{
10.0_f64.powf(0.4 * (m2 - m1))
}
/**
Computes the difference in magnitude of two stars
# Arguments
* `br`: Brightness ratio of two stars
**/
#[inline]
pub fn mag_diff(br: f64) -> f64
{
2.5 * br.log10()
}
/**
Computes the absolute magnitude of a star from its parallax
# Arguments
* `par`: Parallax of the star
* `am`: Apparent magnitude of the star
**/
#[inline]
pub fn abs_mag_frm_parallax(par: f64, am: f64) -> f64
{
am + 5.0 + 5.0*(par.to_degrees() * 3600.0).log10()
}
/**
Computes the absolute magnitude of a star from its distance from earth
# Arguments
* `d`: The star's to earth *(parsecs)*
* `am`: Apparent magnitude of the star
**/
#[inline]
pub fn abs_mag_frm_dist(d: f64, am: f64) -> f64
{
am + 5.0 - 5.0*d.log10()
}
/**
Computes the angle between a vector from a star to the
north celestial pole of the Earth and a vector from the
same star to the north pole of the ecliptic
# Returns
* `angle`: The desired angle *| in radians*
# Arguments
* `eclip_long`: The star's ecliptical longitude *| in radians*
* `eclip_lat`: The star's ecliptical latitude *| in radians*
* `oblq_eclip`: Obliquity of the ecliptic *| in radians*
**/
#[inline]
pub fn angl_between_north_celes_and_eclip_pole(eclip_long: f64,
eclip_lat: f64,
oblq_eclip: f64) -> f64
{
(eclip_long.cos() * oblq_eclip.tan()).atan2 (
eclip_lat.sin() * eclip_long.sin() * oblq_eclip.tan()
- eclip_lat.cos()
)
}
/**
Computes the equatorial coordinates of a star at
at a different time from it's motion in space
This function Computes the equatorial coordinates
of a star at a different time by taking into account
it's proper motion, distance and radial velocity.
# Returns
`(new_asc, new_dec)`
* `new_asc`: Right ascension at the different
time *| in radians*
* `new_dec`: Declination at the different
time *| in radians*
# Arguments
* `asc0`: Right ascension of the star initially *| in radians*
* `dec0`: Declination of the star initially *| in radians*
* `r`: Distance of the star (*parsecs*)
* `delta_r`: Radial velocity of the star (*parsecs/second*)
* `proper_motion_asc`: Proper motion of the star in right ascension
*| in radians*
* `proper_motion_dec`: Proper motion of the star in declination
*| in radians*
* `t`: Decimal years from the inital time; negative in the past
and positive in the future
**/
pub fn
|
(asc0: f64,
dec0: f64,
r: f64,
delta_r: f64,
proper_motion_asc: f64,
proper_motion_dec: f64,
t: f64) -> (f64, f64)
{
let x = r * dec0.cos() * asc0.cos();
let y = r * dec0.cos() * asc0.sin();
let z = r * dec0.sin();
let delta_asc = 3600.0 * proper_motion_asc.to_degrees()/13751.0;
let delta_dec = 3600.0 * proper_motion_dec.to_degrees()/206265.0;
let delta_x = (x / r)*delta_r - z*delta_dec*asc0.cos() - y*delta_asc;
let delta_y = (y / r)*delta_r - z*delta_dec*asc0.sin() + x*delta_asc;
let delta_z = (z / r)*delta_r + r*delta_dec*dec0.cos();
let x1 = x + t*delta_x;
let y1 = y + t*delta_y;
let z1 = z + t*delta_z;
let asc = y1.atan2(x1);
let dec = z1.atan2((x1*x1 + y1*y1).sqrt());
(asc, dec)
}
pub fn proper_motion_in_eq_coords(asc: f64,
dec: f64,
pmotion_asc: f64,
pmotion_dec: f64,
ecl_lat: f64,
oblq_eclip: f64) -> (f64, f64)
{
let ecl_lat_cos = ecl_lat.cos();
let pmotion_long = (
pmotion_dec * oblq_eclip.sin() * asc.cos()
+ pmotion_asc * dec.cos() * (
oblq_eclip.cos() * dec.cos()
+ oblq_eclip.sin() * dec.sin() * asc.sin()
)
) / (ecl_lat_cos * ecl_lat_cos);
let pmotion_lat = (
pmotion_dec * (
oblq_eclip.cos() * dec.cos()
+ oblq_eclip.sin() * dec.sin() * asc.sin()
)
- pmotion_asc * oblq_eclip.sin() * asc.cos() * dec.cos()
) / ecl_lat_cos;
(pmotion_long, pmotion_lat)
}
|
eq_coords_frm_motion
|
identifier_name
|
star.rs
|
/*
Copyright (c) 2015, 2016 Saurav Sachidanand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
//! Stars
/**
Computes the combined magnitude of two stars
# Arguments
* `m1`: Magnitude of star 1
* `m2`: Magnitude of star 2
**/
#[inline]
pub fn combined_mag(m1: f64, m2: f64) -> f64
{
m2 - 2.5 * (brightness_ratio(m1, m2) + 1.0)
}
/**
Computes the combined magnitude of two or more stars
# Arguments
* `m`: Array of magnitudes of stars
**/
pub fn combined_mag_of_many(m: &[f64]) -> f64
{
let mut sum = 0.0;
for i in m.iter() {
sum += 10_f64.powf(-0.4 * i);
}
-2.5 * sum.log10()
}
/**
Computes the brightness ratio of two stars
# Arguments
* `m1`: Magnitude of star 1
* `m2`: Magnitude of star 2
**/
#[inline]
pub fn brightness_ratio(m1: f64, m2: f64) -> f64
{
10.0_f64.powf(0.4 * (m2 - m1))
}
/**
Computes the difference in magnitude of two stars
# Arguments
* `br`: Brightness ratio of two stars
**/
#[inline]
pub fn mag_diff(br: f64) -> f64
|
/**
Computes the absolute magnitude of a star from its parallax
# Arguments
* `par`: Parallax of the star
* `am`: Apparent magnitude of the star
**/
#[inline]
pub fn abs_mag_frm_parallax(par: f64, am: f64) -> f64
{
am + 5.0 + 5.0*(par.to_degrees() * 3600.0).log10()
}
/**
Computes the absolute magnitude of a star from its distance from earth
# Arguments
* `d`: The star's to earth *(parsecs)*
* `am`: Apparent magnitude of the star
**/
#[inline]
pub fn abs_mag_frm_dist(d: f64, am: f64) -> f64
{
am + 5.0 - 5.0*d.log10()
}
/**
Computes the angle between a vector from a star to the
north celestial pole of the Earth and a vector from the
same star to the north pole of the ecliptic
# Returns
* `angle`: The desired angle *| in radians*
# Arguments
* `eclip_long`: The star's ecliptical longitude *| in radians*
* `eclip_lat`: The star's ecliptical latitude *| in radians*
* `oblq_eclip`: Obliquity of the ecliptic *| in radians*
**/
#[inline]
pub fn angl_between_north_celes_and_eclip_pole(eclip_long: f64,
eclip_lat: f64,
oblq_eclip: f64) -> f64
{
(eclip_long.cos() * oblq_eclip.tan()).atan2 (
eclip_lat.sin() * eclip_long.sin() * oblq_eclip.tan()
- eclip_lat.cos()
)
}
/**
Computes the equatorial coordinates of a star at
at a different time from it's motion in space
This function Computes the equatorial coordinates
of a star at a different time by taking into account
it's proper motion, distance and radial velocity.
# Returns
`(new_asc, new_dec)`
* `new_asc`: Right ascension at the different
time *| in radians*
* `new_dec`: Declination at the different
time *| in radians*
# Arguments
* `asc0`: Right ascension of the star initially *| in radians*
* `dec0`: Declination of the star initially *| in radians*
* `r`: Distance of the star (*parsecs*)
* `delta_r`: Radial velocity of the star (*parsecs/second*)
* `proper_motion_asc`: Proper motion of the star in right ascension
*| in radians*
* `proper_motion_dec`: Proper motion of the star in declination
*| in radians*
* `t`: Decimal years from the inital time; negative in the past
and positive in the future
**/
pub fn eq_coords_frm_motion(asc0: f64,
dec0: f64,
r: f64,
delta_r: f64,
proper_motion_asc: f64,
proper_motion_dec: f64,
t: f64) -> (f64, f64)
{
let x = r * dec0.cos() * asc0.cos();
let y = r * dec0.cos() * asc0.sin();
let z = r * dec0.sin();
let delta_asc = 3600.0 * proper_motion_asc.to_degrees()/13751.0;
let delta_dec = 3600.0 * proper_motion_dec.to_degrees()/206265.0;
let delta_x = (x / r)*delta_r - z*delta_dec*asc0.cos() - y*delta_asc;
let delta_y = (y / r)*delta_r - z*delta_dec*asc0.sin() + x*delta_asc;
let delta_z = (z / r)*delta_r + r*delta_dec*dec0.cos();
let x1 = x + t*delta_x;
let y1 = y + t*delta_y;
let z1 = z + t*delta_z;
let asc = y1.atan2(x1);
let dec = z1.atan2((x1*x1 + y1*y1).sqrt());
(asc, dec)
}
pub fn proper_motion_in_eq_coords(asc: f64,
dec: f64,
pmotion_asc: f64,
pmotion_dec: f64,
ecl_lat: f64,
oblq_eclip: f64) -> (f64, f64)
{
let ecl_lat_cos = ecl_lat.cos();
let pmotion_long = (
pmotion_dec * oblq_eclip.sin() * asc.cos()
+ pmotion_asc * dec.cos() * (
oblq_eclip.cos() * dec.cos()
+ oblq_eclip.sin() * dec.sin() * asc.sin()
)
) / (ecl_lat_cos * ecl_lat_cos);
let pmotion_lat = (
pmotion_dec * (
oblq_eclip.cos() * dec.cos()
+ oblq_eclip.sin() * dec.sin() * asc.sin()
)
- pmotion_asc * oblq_eclip.sin() * asc.cos() * dec.cos()
) / ecl_lat_cos;
(pmotion_long, pmotion_lat)
}
|
{
2.5 * br.log10()
}
|
identifier_body
|
struct-style-enum.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// debugger:set print union on
// debugger:rbreak zzz
// debugger:run
// debugger:finish
// debugger:print case1
// check:$1 = {{Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {Case1, a = 0, b = 2088533116, c = 2088533116}, {Case1, a = 0, b = 8970181431921507452}}
// debugger:print case2
// check:$2 = {{Case2, a = 0, b = 4369, c = 4369, d = 4369, e = 4369}, {Case2, a = 0, b = 286331153, c = 286331153}, {Case2, a = 0, b = 1229782938247303441}}
// debugger:print case3
// check:$3 = {{Case3, a = 0, b = 22873, c = 22873, d = 22873, e = 22873}, {Case3, a = 0, b = 1499027801, c = 1499027801}, {Case3, a = 0, b = 6438275382588823897}}
// debugger:print univariant
// check:$4 = {a = -1}
#![allow(unused_variable)]
#![feature(struct_variant)]
// The first element is to ensure proper alignment, irrespective of the machines word size. Since
// the size of the discriminant value is machine dependent, this has be taken into account when
// datatype layout should be predictable as in this case.
enum Regular {
Case1 { a: u64, b: u16, c: u16, d: u16, e: u16},
Case2 { a: u64, b: u32, c: u32},
Case3 { a: u64, b: u64 }
}
enum Univariant {
TheOnlyCase { a: i64 }
}
fn
|
() {
// In order to avoid endianess trouble all of the following test values consist of a single
// repeated byte. This way each interpretation of the union should look the same, no matter if
// this is a big or little endian machine.
// 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
// 0b01111100011111000111110001111100 = 2088533116
// 0b0111110001111100 = 31868
// 0b01111100 = 124
let case1 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 };
// 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
// 0b00010001000100010001000100010001 = 286331153
// 0b0001000100010001 = 4369
// 0b00010001 = 17
let case2 = Case2 { a: 0, b: 286331153, c: 286331153 };
// 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
// 0b01011001010110010101100101011001 = 1499027801
// 0b0101100101011001 = 22873
// 0b01011001 = 89
let case3 = Case3 { a: 0, b: 6438275382588823897 };
let univariant = TheOnlyCase { a: -1 };
zzz();
}
fn zzz() {()}
|
main
|
identifier_name
|
struct-style-enum.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// debugger:set print union on
// debugger:rbreak zzz
// debugger:run
// debugger:finish
// debugger:print case1
// check:$1 = {{Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {Case1, a = 0, b = 2088533116, c = 2088533116}, {Case1, a = 0, b = 8970181431921507452}}
// debugger:print case2
// check:$2 = {{Case2, a = 0, b = 4369, c = 4369, d = 4369, e = 4369}, {Case2, a = 0, b = 286331153, c = 286331153}, {Case2, a = 0, b = 1229782938247303441}}
// debugger:print case3
// check:$3 = {{Case3, a = 0, b = 22873, c = 22873, d = 22873, e = 22873}, {Case3, a = 0, b = 1499027801, c = 1499027801}, {Case3, a = 0, b = 6438275382588823897}}
|
// debugger:print univariant
// check:$4 = {a = -1}
#![allow(unused_variable)]
#![feature(struct_variant)]
// The first element is to ensure proper alignment, irrespective of the machines word size. Since
// the size of the discriminant value is machine dependent, this has be taken into account when
// datatype layout should be predictable as in this case.
enum Regular {
Case1 { a: u64, b: u16, c: u16, d: u16, e: u16},
Case2 { a: u64, b: u32, c: u32},
Case3 { a: u64, b: u64 }
}
enum Univariant {
TheOnlyCase { a: i64 }
}
fn main() {
// In order to avoid endianess trouble all of the following test values consist of a single
// repeated byte. This way each interpretation of the union should look the same, no matter if
// this is a big or little endian machine.
// 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452
// 0b01111100011111000111110001111100 = 2088533116
// 0b0111110001111100 = 31868
// 0b01111100 = 124
let case1 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 };
// 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441
// 0b00010001000100010001000100010001 = 286331153
// 0b0001000100010001 = 4369
// 0b00010001 = 17
let case2 = Case2 { a: 0, b: 286331153, c: 286331153 };
// 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897
// 0b01011001010110010101100101011001 = 1499027801
// 0b0101100101011001 = 22873
// 0b01011001 = 89
let case3 = Case3 { a: 0, b: 6438275382588823897 };
let univariant = TheOnlyCase { a: -1 };
zzz();
}
fn zzz() {()}
|
random_line_split
|
|
mod.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
|
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Types used in the public api
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/mod.rs.in"));
|
random_line_split
|
|
unused-attr.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(unused_attributes)]
#![allow(dead_code, unused_imports)]
#![feature(core, custom_attribute)]
#![foo] //~ ERROR unused attribute
#[foo] //~ ERROR unused attribute
extern crate core;
#[foo] //~ ERROR unused attribute
use std::collections;
#[foo] //~ ERROR unused attribute
extern "C" {
#[foo] //~ ERROR unused attribute
fn foo();
}
#[foo] //~ ERROR unused attribute
mod foo {
#[foo] //~ ERROR unused attribute
pub enum Foo {
#[foo] //~ ERROR unused attribute
Bar,
}
}
#[foo] //~ ERROR unused attribute
fn bar(f: foo::Foo) {
match f {
#[foo] //~ ERROR unused attribute
foo::Foo::Bar => {}
}
}
#[foo] //~ ERROR unused attribute
struct Foo {
#[foo] //~ ERROR unused attribute
a: isize
}
#[foo] //~ ERROR unused attribute
|
fn blah2(&self) {}
}
fn main() {}
|
trait Baz {
#[foo] //~ ERROR unused attribute
fn blah(&self);
#[foo] //~ ERROR unused attribute
|
random_line_split
|
unused-attr.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(unused_attributes)]
#![allow(dead_code, unused_imports)]
#![feature(core, custom_attribute)]
#![foo] //~ ERROR unused attribute
#[foo] //~ ERROR unused attribute
extern crate core;
#[foo] //~ ERROR unused attribute
use std::collections;
#[foo] //~ ERROR unused attribute
extern "C" {
#[foo] //~ ERROR unused attribute
fn foo();
}
#[foo] //~ ERROR unused attribute
mod foo {
#[foo] //~ ERROR unused attribute
pub enum Foo {
#[foo] //~ ERROR unused attribute
Bar,
}
}
#[foo] //~ ERROR unused attribute
fn bar(f: foo::Foo) {
match f {
#[foo] //~ ERROR unused attribute
foo::Foo::Bar => {}
}
}
#[foo] //~ ERROR unused attribute
struct Foo {
#[foo] //~ ERROR unused attribute
a: isize
}
#[foo] //~ ERROR unused attribute
trait Baz {
#[foo] //~ ERROR unused attribute
fn blah(&self);
#[foo] //~ ERROR unused attribute
fn blah2(&self) {}
}
fn
|
() {}
|
main
|
identifier_name
|
unused-attr.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(unused_attributes)]
#![allow(dead_code, unused_imports)]
#![feature(core, custom_attribute)]
#![foo] //~ ERROR unused attribute
#[foo] //~ ERROR unused attribute
extern crate core;
#[foo] //~ ERROR unused attribute
use std::collections;
#[foo] //~ ERROR unused attribute
extern "C" {
#[foo] //~ ERROR unused attribute
fn foo();
}
#[foo] //~ ERROR unused attribute
mod foo {
#[foo] //~ ERROR unused attribute
pub enum Foo {
#[foo] //~ ERROR unused attribute
Bar,
}
}
#[foo] //~ ERROR unused attribute
fn bar(f: foo::Foo) {
match f {
#[foo] //~ ERROR unused attribute
foo::Foo::Bar =>
|
}
}
#[foo] //~ ERROR unused attribute
struct Foo {
#[foo] //~ ERROR unused attribute
a: isize
}
#[foo] //~ ERROR unused attribute
trait Baz {
#[foo] //~ ERROR unused attribute
fn blah(&self);
#[foo] //~ ERROR unused attribute
fn blah2(&self) {}
}
fn main() {}
|
{}
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.