file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs
|
//! Simulates a chip8 cpu and provides a thread-safe interface to control execution and state.
mod threaded;
#[cfg(test)]
mod tests;
use std::sync::{Arc, RwLock};
use types::*;
use Chip8;
use config::Config;
use instruction::{self, Dest, Operation, Src};
pub use self::threaded::SimulatorTask;
use state::RandomBytes;
/// An object that can simulate a chip8 machine.
pub trait Simulate {
/// Fetch the current instruction, advance the PC, and execute the instruction.
fn step(&mut self) -> Chip8Result<()>;
/// Execute multiple instructions.
fn step_n(&mut self, number_of_steps: usize) -> Chip8Result<()>;
/// Advance the sound and delay timers.
fn timer_tick(&mut self) -> Chip8Result<()>;
/// Load bytes into ram.
fn load_bytes(&mut self, bytes: &[u8], addr: Address) -> Chip8Result<()>;
/// Load a program into ram and the configured base address.
fn load_program(&mut self, bytes: &[u8]) -> Chip8Result<()>;
/// Load a value from a Src.
fn load(&mut self, src: Src) -> Chip8Result<usize>;
/// Store a value into a Dest.
fn store(&mut self, dest: Dest, value: usize) -> Chip8Result<()>;
/// Set the keyboard state.
fn set_keyboard(&mut self, keys: &Keyboard) -> Chip8Result<()>;
/// Read the keyboard state.
fn keyboard(&self) -> Chip8Result<Keyboard>;
/// Read the Vram state.
fn vram(&self) -> Chip8Result<Vram>;
/// Read the buzzer state.
fn buzzer(&self) -> Chip8Result<Buzzer>;
/// Read the audio state.
fn audio(&self) -> Chip8Result<Audio>;
}
/// Manages the state of a chip8 cpu.
#[derive(Debug)]
pub struct Simulator {
core: Chip8,
instruction_set: instruction::Set,
}
impl Simulate for Simulator {
/// Loads bytes into RAM starting at the given address.
fn load_bytes(&mut self, bytes: &[u8], addr: Address) -> Chip8Result<()> {
self.core.load_bytes(bytes, addr)
}
fn load_program(&mut self, bytes: &[u8]) -> Chip8Result<()> {
let address = self.core.config.addr_program;
self.load_bytes(bytes, address as Address)
}
/// Decrements the delay and sound timer.
fn timer_tick(&mut self) -> Chip8Result<()> {
if self.core.dt > 0 {
self.core.dt -= 1;
}
if self.core.st > 0 {
self.core.st -= 1;
}
Ok(())
}
fn load(&mut self, src: Src) -> Chip8Result<usize> {
self.core.load(src)
}
fn store(&mut self, dest: Dest, value: usize) -> Chip8Result<()> {
self.core.store(dest, value)
}
fn step(&mut self) -> Chip8Result<()> {
let instruction = try!(self.decode_at_addr(self.core.pc()));
self.core.advance_pc();
try!(instruction.execute(&mut self.core));
Ok(())
}
fn step_n(&mut self, number_of_steps: usize) -> Chip8Result<()> {
for _ in 0..number_of_steps {
try!(self.step())
}
Ok(())
}
fn set_keyboard(&mut self, keys: &Keyboard) -> Chip8Result<()> {
self.core.set_keyboard(keys)
}
fn keyboard(&self) -> Chip8Result<Keyboard> {
self.core.keyboard()
}
fn vram(&self) -> Chip8Result<Vram> {
self.core.vram()
}
fn buzzer(&self) -> Chip8Result<Buzzer>
|
fn audio(&self) -> Chip8Result<Audio> {
self.core.audio()
}
}
impl Simulator {
/// Returns a new Simulator.
pub fn new(config: &Config, rand_iterator: Option<RandomBytes>) -> Chip8Result<Simulator> {
let core: Chip8 = Chip8::new(config, rand_iterator);
let iset = instruction::Set::new(config);
let mut s = Simulator {
core: core,
instruction_set: iset,
};
try!(s.load_bytes(config.font_small, config.addr_font as Address));
try!(s.core.store(Dest::PC, config.addr_program));
Ok(s)
}
/// Returns a default simulator, using the default configuration.
pub fn default() -> Chip8Result<Simulator> {
Self::new(&Config::default(), None)
}
/// Decodes an instruction. TODO: Move to ::instruction
pub fn decode_instruction(&self, codeword: Codeword) -> Chip8Result<Operation> {
self.instruction_set
.decode(codeword)
.ok_or_else(|| Chip8Error::InvalidInstruction(codeword))
}
/// Decodes the instruction stored in RAM at the given address.
pub fn decode_at_addr(&self, addr: Address) -> Chip8Result<Operation> {
let a = addr as usize;
let hi = (self.core.ram[a] as Codeword) << 8;
let lo = self.core.ram[a + 1] as Codeword;
let codeword = hi | lo;
self.decode_instruction(codeword)
}
/// Get the 16-bit word stored at the location pointed to by the program counter.
pub fn current_codeword(&self) -> Codeword {
let pc = self.core.pc as usize;
let hi = self.core.ram[pc] as Codeword;
let lo = self.core.ram[pc + 1] as Codeword;
(hi << 8) | lo
}
/// Returns a copy of the lock for the keyboard.
fn keyboard_lock(&mut self) -> Chip8Result<Arc<RwLock<Keyboard>>> {
Ok(self.core.keyboard_lock())
}
/// Returns a copy of the lock for the vram.
fn vram_lock(&mut self) -> Chip8Result<Arc<RwLock<Vram>>> {
Ok(self.core.vram_lock())
}
/// Returns a copy of the lock for the buzzer.
fn buzzer_lock(&mut self) -> Chip8Result<Arc<RwLock<Buzzer>>> {
Ok(self.core.buzzer_lock())
}
/// Returns a copy of the lock for the audio.
fn audio_lock(&mut self) -> Chip8Result<Arc<RwLock<Audio>>> {
Ok(self.core.audio_lock())
}
}
|
{
self.core.buzzer()
}
|
identifier_body
|
mod.rs
|
//! Simulates a chip8 cpu and provides a thread-safe interface to control execution and state.
mod threaded;
#[cfg(test)]
mod tests;
use std::sync::{Arc, RwLock};
use types::*;
use Chip8;
use config::Config;
use instruction::{self, Dest, Operation, Src};
pub use self::threaded::SimulatorTask;
use state::RandomBytes;
/// An object that can simulate a chip8 machine.
pub trait Simulate {
/// Fetch the current instruction, advance the PC, and execute the instruction.
fn step(&mut self) -> Chip8Result<()>;
/// Execute multiple instructions.
fn step_n(&mut self, number_of_steps: usize) -> Chip8Result<()>;
/// Advance the sound and delay timers.
fn timer_tick(&mut self) -> Chip8Result<()>;
/// Load bytes into ram.
fn load_bytes(&mut self, bytes: &[u8], addr: Address) -> Chip8Result<()>;
/// Load a program into ram and the configured base address.
fn load_program(&mut self, bytes: &[u8]) -> Chip8Result<()>;
/// Load a value from a Src.
fn load(&mut self, src: Src) -> Chip8Result<usize>;
/// Store a value into a Dest.
fn store(&mut self, dest: Dest, value: usize) -> Chip8Result<()>;
/// Set the keyboard state.
fn set_keyboard(&mut self, keys: &Keyboard) -> Chip8Result<()>;
/// Read the keyboard state.
fn keyboard(&self) -> Chip8Result<Keyboard>;
/// Read the Vram state.
fn vram(&self) -> Chip8Result<Vram>;
/// Read the buzzer state.
fn buzzer(&self) -> Chip8Result<Buzzer>;
/// Read the audio state.
fn audio(&self) -> Chip8Result<Audio>;
}
/// Manages the state of a chip8 cpu.
#[derive(Debug)]
pub struct Simulator {
core: Chip8,
instruction_set: instruction::Set,
}
impl Simulate for Simulator {
/// Loads bytes into RAM starting at the given address.
fn load_bytes(&mut self, bytes: &[u8], addr: Address) -> Chip8Result<()> {
self.core.load_bytes(bytes, addr)
}
fn load_program(&mut self, bytes: &[u8]) -> Chip8Result<()> {
let address = self.core.config.addr_program;
self.load_bytes(bytes, address as Address)
}
/// Decrements the delay and sound timer.
fn timer_tick(&mut self) -> Chip8Result<()> {
if self.core.dt > 0 {
self.core.dt -= 1;
}
if self.core.st > 0
|
Ok(())
}
fn load(&mut self, src: Src) -> Chip8Result<usize> {
self.core.load(src)
}
fn store(&mut self, dest: Dest, value: usize) -> Chip8Result<()> {
self.core.store(dest, value)
}
fn step(&mut self) -> Chip8Result<()> {
let instruction = try!(self.decode_at_addr(self.core.pc()));
self.core.advance_pc();
try!(instruction.execute(&mut self.core));
Ok(())
}
fn step_n(&mut self, number_of_steps: usize) -> Chip8Result<()> {
for _ in 0..number_of_steps {
try!(self.step())
}
Ok(())
}
fn set_keyboard(&mut self, keys: &Keyboard) -> Chip8Result<()> {
self.core.set_keyboard(keys)
}
fn keyboard(&self) -> Chip8Result<Keyboard> {
self.core.keyboard()
}
fn vram(&self) -> Chip8Result<Vram> {
self.core.vram()
}
fn buzzer(&self) -> Chip8Result<Buzzer> {
self.core.buzzer()
}
fn audio(&self) -> Chip8Result<Audio> {
self.core.audio()
}
}
impl Simulator {
/// Returns a new Simulator.
pub fn new(config: &Config, rand_iterator: Option<RandomBytes>) -> Chip8Result<Simulator> {
let core: Chip8 = Chip8::new(config, rand_iterator);
let iset = instruction::Set::new(config);
let mut s = Simulator {
core: core,
instruction_set: iset,
};
try!(s.load_bytes(config.font_small, config.addr_font as Address));
try!(s.core.store(Dest::PC, config.addr_program));
Ok(s)
}
/// Returns a default simulator, using the default configuration.
pub fn default() -> Chip8Result<Simulator> {
Self::new(&Config::default(), None)
}
/// Decodes an instruction. TODO: Move to ::instruction
pub fn decode_instruction(&self, codeword: Codeword) -> Chip8Result<Operation> {
self.instruction_set
.decode(codeword)
.ok_or_else(|| Chip8Error::InvalidInstruction(codeword))
}
/// Decodes the instruction stored in RAM at the given address.
pub fn decode_at_addr(&self, addr: Address) -> Chip8Result<Operation> {
let a = addr as usize;
let hi = (self.core.ram[a] as Codeword) << 8;
let lo = self.core.ram[a + 1] as Codeword;
let codeword = hi | lo;
self.decode_instruction(codeword)
}
/// Get the 16-bit word stored at the location pointed to by the program counter.
pub fn current_codeword(&self) -> Codeword {
let pc = self.core.pc as usize;
let hi = self.core.ram[pc] as Codeword;
let lo = self.core.ram[pc + 1] as Codeword;
(hi << 8) | lo
}
/// Returns a copy of the lock for the keyboard.
fn keyboard_lock(&mut self) -> Chip8Result<Arc<RwLock<Keyboard>>> {
Ok(self.core.keyboard_lock())
}
/// Returns a copy of the lock for the vram.
fn vram_lock(&mut self) -> Chip8Result<Arc<RwLock<Vram>>> {
Ok(self.core.vram_lock())
}
/// Returns a copy of the lock for the buzzer.
fn buzzer_lock(&mut self) -> Chip8Result<Arc<RwLock<Buzzer>>> {
Ok(self.core.buzzer_lock())
}
/// Returns a copy of the lock for the audio.
fn audio_lock(&mut self) -> Chip8Result<Arc<RwLock<Audio>>> {
Ok(self.core.audio_lock())
}
}
|
{
self.core.st -= 1;
}
|
conditional_block
|
mod.rs
|
//! Simulates a chip8 cpu and provides a thread-safe interface to control execution and state.
mod threaded;
#[cfg(test)]
mod tests;
use std::sync::{Arc, RwLock};
use types::*;
use Chip8;
use config::Config;
use instruction::{self, Dest, Operation, Src};
pub use self::threaded::SimulatorTask;
use state::RandomBytes;
/// An object that can simulate a chip8 machine.
pub trait Simulate {
/// Fetch the current instruction, advance the PC, and execute the instruction.
fn step(&mut self) -> Chip8Result<()>;
/// Execute multiple instructions.
fn step_n(&mut self, number_of_steps: usize) -> Chip8Result<()>;
/// Advance the sound and delay timers.
fn timer_tick(&mut self) -> Chip8Result<()>;
/// Load bytes into ram.
fn load_bytes(&mut self, bytes: &[u8], addr: Address) -> Chip8Result<()>;
/// Load a program into ram and the configured base address.
fn load_program(&mut self, bytes: &[u8]) -> Chip8Result<()>;
/// Load a value from a Src.
fn load(&mut self, src: Src) -> Chip8Result<usize>;
/// Store a value into a Dest.
fn store(&mut self, dest: Dest, value: usize) -> Chip8Result<()>;
/// Set the keyboard state.
fn set_keyboard(&mut self, keys: &Keyboard) -> Chip8Result<()>;
/// Read the keyboard state.
fn keyboard(&self) -> Chip8Result<Keyboard>;
/// Read the Vram state.
fn vram(&self) -> Chip8Result<Vram>;
/// Read the buzzer state.
fn buzzer(&self) -> Chip8Result<Buzzer>;
/// Read the audio state.
fn audio(&self) -> Chip8Result<Audio>;
}
/// Manages the state of a chip8 cpu.
#[derive(Debug)]
pub struct Simulator {
core: Chip8,
instruction_set: instruction::Set,
}
impl Simulate for Simulator {
/// Loads bytes into RAM starting at the given address.
fn load_bytes(&mut self, bytes: &[u8], addr: Address) -> Chip8Result<()> {
self.core.load_bytes(bytes, addr)
}
fn load_program(&mut self, bytes: &[u8]) -> Chip8Result<()> {
let address = self.core.config.addr_program;
self.load_bytes(bytes, address as Address)
}
/// Decrements the delay and sound timer.
fn timer_tick(&mut self) -> Chip8Result<()> {
if self.core.dt > 0 {
self.core.dt -= 1;
}
if self.core.st > 0 {
self.core.st -= 1;
}
Ok(())
}
fn load(&mut self, src: Src) -> Chip8Result<usize> {
self.core.load(src)
}
fn store(&mut self, dest: Dest, value: usize) -> Chip8Result<()> {
self.core.store(dest, value)
}
fn step(&mut self) -> Chip8Result<()> {
let instruction = try!(self.decode_at_addr(self.core.pc()));
self.core.advance_pc();
try!(instruction.execute(&mut self.core));
Ok(())
}
fn step_n(&mut self, number_of_steps: usize) -> Chip8Result<()> {
for _ in 0..number_of_steps {
try!(self.step())
}
Ok(())
}
fn set_keyboard(&mut self, keys: &Keyboard) -> Chip8Result<()> {
self.core.set_keyboard(keys)
}
fn keyboard(&self) -> Chip8Result<Keyboard> {
self.core.keyboard()
}
fn vram(&self) -> Chip8Result<Vram> {
self.core.vram()
}
fn buzzer(&self) -> Chip8Result<Buzzer> {
self.core.buzzer()
}
fn audio(&self) -> Chip8Result<Audio> {
self.core.audio()
}
}
impl Simulator {
/// Returns a new Simulator.
pub fn new(config: &Config, rand_iterator: Option<RandomBytes>) -> Chip8Result<Simulator> {
let core: Chip8 = Chip8::new(config, rand_iterator);
let iset = instruction::Set::new(config);
let mut s = Simulator {
core: core,
instruction_set: iset,
};
try!(s.load_bytes(config.font_small, config.addr_font as Address));
try!(s.core.store(Dest::PC, config.addr_program));
Ok(s)
}
/// Returns a default simulator, using the default configuration.
pub fn default() -> Chip8Result<Simulator> {
Self::new(&Config::default(), None)
}
/// Decodes an instruction. TODO: Move to ::instruction
pub fn decode_instruction(&self, codeword: Codeword) -> Chip8Result<Operation> {
self.instruction_set
.decode(codeword)
.ok_or_else(|| Chip8Error::InvalidInstruction(codeword))
}
/// Decodes the instruction stored in RAM at the given address.
pub fn decode_at_addr(&self, addr: Address) -> Chip8Result<Operation> {
let a = addr as usize;
let hi = (self.core.ram[a] as Codeword) << 8;
let lo = self.core.ram[a + 1] as Codeword;
let codeword = hi | lo;
self.decode_instruction(codeword)
}
/// Get the 16-bit word stored at the location pointed to by the program counter.
pub fn current_codeword(&self) -> Codeword {
let pc = self.core.pc as usize;
let hi = self.core.ram[pc] as Codeword;
let lo = self.core.ram[pc + 1] as Codeword;
(hi << 8) | lo
}
/// Returns a copy of the lock for the keyboard.
fn keyboard_lock(&mut self) -> Chip8Result<Arc<RwLock<Keyboard>>> {
Ok(self.core.keyboard_lock())
}
/// Returns a copy of the lock for the vram.
fn vram_lock(&mut self) -> Chip8Result<Arc<RwLock<Vram>>> {
Ok(self.core.vram_lock())
}
/// Returns a copy of the lock for the buzzer.
fn
|
(&mut self) -> Chip8Result<Arc<RwLock<Buzzer>>> {
Ok(self.core.buzzer_lock())
}
/// Returns a copy of the lock for the audio.
fn audio_lock(&mut self) -> Chip8Result<Arc<RwLock<Audio>>> {
Ok(self.core.audio_lock())
}
}
|
buzzer_lock
|
identifier_name
|
mod.rs
|
//! Simulates a chip8 cpu and provides a thread-safe interface to control execution and state.
mod threaded;
#[cfg(test)]
mod tests;
use std::sync::{Arc, RwLock};
use types::*;
use Chip8;
use config::Config;
use instruction::{self, Dest, Operation, Src};
pub use self::threaded::SimulatorTask;
use state::RandomBytes;
/// An object that can simulate a chip8 machine.
pub trait Simulate {
/// Fetch the current instruction, advance the PC, and execute the instruction.
fn step(&mut self) -> Chip8Result<()>;
/// Execute multiple instructions.
fn step_n(&mut self, number_of_steps: usize) -> Chip8Result<()>;
/// Advance the sound and delay timers.
fn timer_tick(&mut self) -> Chip8Result<()>;
/// Load bytes into ram.
fn load_bytes(&mut self, bytes: &[u8], addr: Address) -> Chip8Result<()>;
/// Load a program into ram and the configured base address.
fn load_program(&mut self, bytes: &[u8]) -> Chip8Result<()>;
/// Load a value from a Src.
fn load(&mut self, src: Src) -> Chip8Result<usize>;
/// Store a value into a Dest.
fn store(&mut self, dest: Dest, value: usize) -> Chip8Result<()>;
/// Set the keyboard state.
fn set_keyboard(&mut self, keys: &Keyboard) -> Chip8Result<()>;
/// Read the keyboard state.
fn keyboard(&self) -> Chip8Result<Keyboard>;
/// Read the Vram state.
fn vram(&self) -> Chip8Result<Vram>;
/// Read the buzzer state.
fn buzzer(&self) -> Chip8Result<Buzzer>;
/// Read the audio state.
fn audio(&self) -> Chip8Result<Audio>;
}
/// Manages the state of a chip8 cpu.
#[derive(Debug)]
pub struct Simulator {
core: Chip8,
instruction_set: instruction::Set,
}
impl Simulate for Simulator {
/// Loads bytes into RAM starting at the given address.
fn load_bytes(&mut self, bytes: &[u8], addr: Address) -> Chip8Result<()> {
self.core.load_bytes(bytes, addr)
}
fn load_program(&mut self, bytes: &[u8]) -> Chip8Result<()> {
let address = self.core.config.addr_program;
self.load_bytes(bytes, address as Address)
}
/// Decrements the delay and sound timer.
fn timer_tick(&mut self) -> Chip8Result<()> {
if self.core.dt > 0 {
self.core.dt -= 1;
}
if self.core.st > 0 {
self.core.st -= 1;
}
Ok(())
}
fn load(&mut self, src: Src) -> Chip8Result<usize> {
self.core.load(src)
}
fn store(&mut self, dest: Dest, value: usize) -> Chip8Result<()> {
self.core.store(dest, value)
}
fn step(&mut self) -> Chip8Result<()> {
let instruction = try!(self.decode_at_addr(self.core.pc()));
self.core.advance_pc();
try!(instruction.execute(&mut self.core));
Ok(())
}
fn step_n(&mut self, number_of_steps: usize) -> Chip8Result<()> {
for _ in 0..number_of_steps {
try!(self.step())
}
Ok(())
}
fn set_keyboard(&mut self, keys: &Keyboard) -> Chip8Result<()> {
self.core.set_keyboard(keys)
}
fn keyboard(&self) -> Chip8Result<Keyboard> {
self.core.keyboard()
}
fn vram(&self) -> Chip8Result<Vram> {
self.core.vram()
}
fn buzzer(&self) -> Chip8Result<Buzzer> {
self.core.buzzer()
}
fn audio(&self) -> Chip8Result<Audio> {
self.core.audio()
}
}
impl Simulator {
|
pub fn new(config: &Config, rand_iterator: Option<RandomBytes>) -> Chip8Result<Simulator> {
let core: Chip8 = Chip8::new(config, rand_iterator);
let iset = instruction::Set::new(config);
let mut s = Simulator {
core: core,
instruction_set: iset,
};
try!(s.load_bytes(config.font_small, config.addr_font as Address));
try!(s.core.store(Dest::PC, config.addr_program));
Ok(s)
}
/// Returns a default simulator, using the default configuration.
pub fn default() -> Chip8Result<Simulator> {
Self::new(&Config::default(), None)
}
/// Decodes an instruction. TODO: Move to ::instruction
pub fn decode_instruction(&self, codeword: Codeword) -> Chip8Result<Operation> {
self.instruction_set
.decode(codeword)
.ok_or_else(|| Chip8Error::InvalidInstruction(codeword))
}
/// Decodes the instruction stored in RAM at the given address.
pub fn decode_at_addr(&self, addr: Address) -> Chip8Result<Operation> {
let a = addr as usize;
let hi = (self.core.ram[a] as Codeword) << 8;
let lo = self.core.ram[a + 1] as Codeword;
let codeword = hi | lo;
self.decode_instruction(codeword)
}
/// Get the 16-bit word stored at the location pointed to by the program counter.
pub fn current_codeword(&self) -> Codeword {
let pc = self.core.pc as usize;
let hi = self.core.ram[pc] as Codeword;
let lo = self.core.ram[pc + 1] as Codeword;
(hi << 8) | lo
}
/// Returns a copy of the lock for the keyboard.
fn keyboard_lock(&mut self) -> Chip8Result<Arc<RwLock<Keyboard>>> {
Ok(self.core.keyboard_lock())
}
/// Returns a copy of the lock for the vram.
fn vram_lock(&mut self) -> Chip8Result<Arc<RwLock<Vram>>> {
Ok(self.core.vram_lock())
}
/// Returns a copy of the lock for the buzzer.
fn buzzer_lock(&mut self) -> Chip8Result<Arc<RwLock<Buzzer>>> {
Ok(self.core.buzzer_lock())
}
/// Returns a copy of the lock for the audio.
fn audio_lock(&mut self) -> Chip8Result<Arc<RwLock<Audio>>> {
Ok(self.core.audio_lock())
}
}
|
/// Returns a new Simulator.
|
random_line_split
|
decoder.rs
|
//! Parse and decode COSE signatures.
use cbor::CborType;
use cbor::decoder::decode;
use {CoseError, SignatureAlgorithm};
use util::get_sig_struct_bytes;
use std::collections::BTreeMap;
pub const COSE_SIGN_TAG: u64 = 98;
/// The result of `decode_signature` holding a decoded COSE signature.
#[derive(Debug)]
pub struct CoseSignature {
pub signature_type: SignatureAlgorithm,
pub signature: Vec<u8>,
pub signer_cert: Vec<u8>,
pub certs: Vec<Vec<u8>>,
pub to_verify: Vec<u8>,
}
pub const COSE_TYPE_ES256: i64 = -7;
pub const COSE_TYPE_ES384: i64 = -35;
pub const COSE_TYPE_ES512: i64 = -36;
pub const COSE_TYPE_PS256: i64 = -37;
pub const COSE_HEADER_ALG: u64 = 1;
pub const COSE_HEADER_KID: u64 = 4;
macro_rules! unpack {
($to:tt, $var:ident) => (
match *$var {
CborType::$to(ref cbor_object) => {
cbor_object
}
_ => return Err(CoseError::UnexpectedType),
};
)
}
fn get_map_value(
map: &BTreeMap<CborType, CborType>,
key: &CborType,
) -> Result<CborType, CoseError> {
match map.get(key) {
Some(x) => Ok(x.clone()),
_ => Err(CoseError::MissingHeader),
}
}
/// Ensure that the referenced `CborType` is an empty map.
fn ensure_empty_map(map: &CborType) -> Result<(), CoseError> {
let unpacked = unpack!(Map, map);
if!unpacked.is_empty() {
return Err(CoseError::MalformedInput);
}
Ok(())
}
// This syntax is a little unintuitive. Taken together, the two previous definitions essentially
// mean:
//
// COSE_Sign = [
// protected : empty_or_serialized_map,
// unprotected : header_map
// payload : bstr / nil,
// signatures : [+ COSE_Signature]
// ]
//
// (COSE_Sign is an array. The first element is an empty or serialized map (in our case, it is
// never expected to be empty). The second element is a map (it is expected to be empty. The third
// element is a bstr or nil (it is expected to be nil). The fourth element is an array of
// COSE_Signature.)
//
// COSE_Signature = [
// Headers,
// signature : bstr
// ]
//
// but again, unpacking this:
//
// COSE_Signature = [
// protected : empty_or_serialized_map,
// unprotected : header_map
// signature : bstr
// ]
fn decode_signature_struct(
cose_signature: &CborType,
payload: &[u8],
protected_body_head: &CborType,
) -> Result<CoseSignature, CoseError> {
let cose_signature = unpack!(Array, cose_signature);
if cose_signature.len()!= 3 {
return Err(CoseError::MalformedInput);
}
let protected_signature_header_serialized = &cose_signature[0];
let protected_signature_header_bytes = unpack!(Bytes, protected_signature_header_serialized);
// Parse the protected signature header.
let protected_signature_header = &match decode(protected_signature_header_bytes) {
Err(_) => return Err(CoseError::DecodingFailure),
Ok(value) => value,
};
let protected_signature_header = unpack!(Map, protected_signature_header);
if protected_signature_header.len()!= 2 {
return Err(CoseError::MalformedInput);
}
let signature_algorithm = get_map_value(
protected_signature_header,
&CborType::Integer(COSE_HEADER_ALG),
)?;
let signature_algorithm = match signature_algorithm {
CborType::SignedInteger(val) => {
match val {
COSE_TYPE_ES256 => SignatureAlgorithm::ES256,
COSE_TYPE_ES384 => SignatureAlgorithm::ES384,
COSE_TYPE_ES512 => SignatureAlgorithm::ES512,
COSE_TYPE_PS256 => SignatureAlgorithm::PS256,
_ => return Err(CoseError::UnexpectedHeaderValue),
}
}
_ => return Err(CoseError::UnexpectedType),
};
let ee_cert = &get_map_value(
protected_signature_header,
&CborType::Integer(COSE_HEADER_KID),
)?;
let ee_cert = unpack!(Bytes, ee_cert).clone();
// The unprotected header section is expected to be an empty map.
ensure_empty_map(&cose_signature[1])?;
// Build signature structure to verify.
let signature_bytes = &cose_signature[2];
let signature_bytes = unpack!(Bytes, signature_bytes).clone();
let sig_structure_bytes = get_sig_struct_bytes(
protected_body_head.clone(),
protected_signature_header_serialized.clone(),
payload,
);
// Read intermediate certificates from protected_body_head.
// Any tampering of the protected header during transport will be detected
// because it is input to the signature verification.
// Note that a protected header has to be present and hold a kid with an
// empty list of intermediate certificates.
let protected_body_head_bytes = unpack!(Bytes, protected_body_head);
let protected_body_head_map = &match decode(protected_body_head_bytes) {
Ok(value) => value,
Err(_) => return Err(CoseError::DecodingFailure),
};
let protected_body_head_map = unpack!(Map, protected_body_head_map);
if protected_body_head_map.len()!= 1 {
return Err(CoseError::MalformedInput);
}
let intermediate_certs_array =
&get_map_value(protected_body_head_map, &CborType::Integer(COSE_HEADER_KID))?;
let intermediate_certs = unpack!(Array, intermediate_certs_array);
let mut certs: Vec<Vec<u8>> = Vec::new();
for cert in intermediate_certs {
let cert = unpack!(Bytes, cert);
certs.push(cert.clone());
}
Ok(CoseSignature {
signature_type: signature_algorithm,
signature: signature_bytes,
signer_cert: ee_cert,
certs: certs,
to_verify: sig_structure_bytes,
})
|
}
/// Decode COSE signature bytes and return a vector of `CoseSignature`.
///
///```rust,ignore
/// COSE_Sign = [
/// Headers,
/// payload : bstr / nil,
/// signatures : [+ COSE_Signature]
/// ]
///
/// Headers = (
/// protected : empty_or_serialized_map,
/// unprotected : header_map
/// )
///```
pub fn decode_signature(bytes: &[u8], payload: &[u8]) -> Result<Vec<CoseSignature>, CoseError> {
// This has to be a COSE_Sign object, which is a tagged array.
let tagged_cose_sign = match decode(bytes) {
Err(_) => return Err(CoseError::DecodingFailure),
Ok(value) => value,
};
let cose_sign_array = match tagged_cose_sign {
CborType::Tag(tag, cose_sign) => {
if tag!= COSE_SIGN_TAG {
return Err(CoseError::UnexpectedTag);
}
match *cose_sign {
CborType::Array(values) => values,
_ => return Err(CoseError::UnexpectedType),
}
}
_ => return Err(CoseError::UnexpectedType),
};
if cose_sign_array.len()!= 4 {
return Err(CoseError::MalformedInput);
}
// The unprotected header section is expected to be an empty map.
ensure_empty_map(&cose_sign_array[1])?;
// The payload is expected to be Null (i.e. this is a detached signature).
match cose_sign_array[2] {
CborType::Null => {}
_ => return Err(CoseError::UnexpectedType),
};
let signatures = &cose_sign_array[3];
let signatures = unpack!(Array, signatures);
// Decode COSE_Signatures.
// There has to be at least one signature to make this a valid COSE signature.
if signatures.len() < 1 {
return Err(CoseError::MalformedInput);
}
let mut result = Vec::new();
for cose_signature in signatures {
// cose_sign_array[0] holds the protected body header.
let signature = decode_signature_struct(cose_signature, payload, &cose_sign_array[0])?;
result.push(signature);
}
Ok(result)
}
|
random_line_split
|
|
decoder.rs
|
//! Parse and decode COSE signatures.
use cbor::CborType;
use cbor::decoder::decode;
use {CoseError, SignatureAlgorithm};
use util::get_sig_struct_bytes;
use std::collections::BTreeMap;
pub const COSE_SIGN_TAG: u64 = 98;
/// The result of `decode_signature` holding a decoded COSE signature.
#[derive(Debug)]
pub struct CoseSignature {
pub signature_type: SignatureAlgorithm,
pub signature: Vec<u8>,
pub signer_cert: Vec<u8>,
pub certs: Vec<Vec<u8>>,
pub to_verify: Vec<u8>,
}
pub const COSE_TYPE_ES256: i64 = -7;
pub const COSE_TYPE_ES384: i64 = -35;
pub const COSE_TYPE_ES512: i64 = -36;
pub const COSE_TYPE_PS256: i64 = -37;
pub const COSE_HEADER_ALG: u64 = 1;
pub const COSE_HEADER_KID: u64 = 4;
macro_rules! unpack {
($to:tt, $var:ident) => (
match *$var {
CborType::$to(ref cbor_object) => {
cbor_object
}
_ => return Err(CoseError::UnexpectedType),
};
)
}
fn get_map_value(
map: &BTreeMap<CborType, CborType>,
key: &CborType,
) -> Result<CborType, CoseError> {
match map.get(key) {
Some(x) => Ok(x.clone()),
_ => Err(CoseError::MissingHeader),
}
}
/// Ensure that the referenced `CborType` is an empty map.
fn
|
(map: &CborType) -> Result<(), CoseError> {
let unpacked = unpack!(Map, map);
if!unpacked.is_empty() {
return Err(CoseError::MalformedInput);
}
Ok(())
}
// This syntax is a little unintuitive. Taken together, the two previous definitions essentially
// mean:
//
// COSE_Sign = [
// protected : empty_or_serialized_map,
// unprotected : header_map
// payload : bstr / nil,
// signatures : [+ COSE_Signature]
// ]
//
// (COSE_Sign is an array. The first element is an empty or serialized map (in our case, it is
// never expected to be empty). The second element is a map (it is expected to be empty. The third
// element is a bstr or nil (it is expected to be nil). The fourth element is an array of
// COSE_Signature.)
//
// COSE_Signature = [
// Headers,
// signature : bstr
// ]
//
// but again, unpacking this:
//
// COSE_Signature = [
// protected : empty_or_serialized_map,
// unprotected : header_map
// signature : bstr
// ]
fn decode_signature_struct(
cose_signature: &CborType,
payload: &[u8],
protected_body_head: &CborType,
) -> Result<CoseSignature, CoseError> {
let cose_signature = unpack!(Array, cose_signature);
if cose_signature.len()!= 3 {
return Err(CoseError::MalformedInput);
}
let protected_signature_header_serialized = &cose_signature[0];
let protected_signature_header_bytes = unpack!(Bytes, protected_signature_header_serialized);
// Parse the protected signature header.
let protected_signature_header = &match decode(protected_signature_header_bytes) {
Err(_) => return Err(CoseError::DecodingFailure),
Ok(value) => value,
};
let protected_signature_header = unpack!(Map, protected_signature_header);
if protected_signature_header.len()!= 2 {
return Err(CoseError::MalformedInput);
}
let signature_algorithm = get_map_value(
protected_signature_header,
&CborType::Integer(COSE_HEADER_ALG),
)?;
let signature_algorithm = match signature_algorithm {
CborType::SignedInteger(val) => {
match val {
COSE_TYPE_ES256 => SignatureAlgorithm::ES256,
COSE_TYPE_ES384 => SignatureAlgorithm::ES384,
COSE_TYPE_ES512 => SignatureAlgorithm::ES512,
COSE_TYPE_PS256 => SignatureAlgorithm::PS256,
_ => return Err(CoseError::UnexpectedHeaderValue),
}
}
_ => return Err(CoseError::UnexpectedType),
};
let ee_cert = &get_map_value(
protected_signature_header,
&CborType::Integer(COSE_HEADER_KID),
)?;
let ee_cert = unpack!(Bytes, ee_cert).clone();
// The unprotected header section is expected to be an empty map.
ensure_empty_map(&cose_signature[1])?;
// Build signature structure to verify.
let signature_bytes = &cose_signature[2];
let signature_bytes = unpack!(Bytes, signature_bytes).clone();
let sig_structure_bytes = get_sig_struct_bytes(
protected_body_head.clone(),
protected_signature_header_serialized.clone(),
payload,
);
// Read intermediate certificates from protected_body_head.
// Any tampering of the protected header during transport will be detected
// because it is input to the signature verification.
// Note that a protected header has to be present and hold a kid with an
// empty list of intermediate certificates.
let protected_body_head_bytes = unpack!(Bytes, protected_body_head);
let protected_body_head_map = &match decode(protected_body_head_bytes) {
Ok(value) => value,
Err(_) => return Err(CoseError::DecodingFailure),
};
let protected_body_head_map = unpack!(Map, protected_body_head_map);
if protected_body_head_map.len()!= 1 {
return Err(CoseError::MalformedInput);
}
let intermediate_certs_array =
&get_map_value(protected_body_head_map, &CborType::Integer(COSE_HEADER_KID))?;
let intermediate_certs = unpack!(Array, intermediate_certs_array);
let mut certs: Vec<Vec<u8>> = Vec::new();
for cert in intermediate_certs {
let cert = unpack!(Bytes, cert);
certs.push(cert.clone());
}
Ok(CoseSignature {
signature_type: signature_algorithm,
signature: signature_bytes,
signer_cert: ee_cert,
certs: certs,
to_verify: sig_structure_bytes,
})
}
/// Decode COSE signature bytes and return a vector of `CoseSignature`.
///
///```rust,ignore
/// COSE_Sign = [
/// Headers,
/// payload : bstr / nil,
/// signatures : [+ COSE_Signature]
/// ]
///
/// Headers = (
/// protected : empty_or_serialized_map,
/// unprotected : header_map
/// )
///```
pub fn decode_signature(bytes: &[u8], payload: &[u8]) -> Result<Vec<CoseSignature>, CoseError> {
// This has to be a COSE_Sign object, which is a tagged array.
let tagged_cose_sign = match decode(bytes) {
Err(_) => return Err(CoseError::DecodingFailure),
Ok(value) => value,
};
let cose_sign_array = match tagged_cose_sign {
CborType::Tag(tag, cose_sign) => {
if tag!= COSE_SIGN_TAG {
return Err(CoseError::UnexpectedTag);
}
match *cose_sign {
CborType::Array(values) => values,
_ => return Err(CoseError::UnexpectedType),
}
}
_ => return Err(CoseError::UnexpectedType),
};
if cose_sign_array.len()!= 4 {
return Err(CoseError::MalformedInput);
}
// The unprotected header section is expected to be an empty map.
ensure_empty_map(&cose_sign_array[1])?;
// The payload is expected to be Null (i.e. this is a detached signature).
match cose_sign_array[2] {
CborType::Null => {}
_ => return Err(CoseError::UnexpectedType),
};
let signatures = &cose_sign_array[3];
let signatures = unpack!(Array, signatures);
// Decode COSE_Signatures.
// There has to be at least one signature to make this a valid COSE signature.
if signatures.len() < 1 {
return Err(CoseError::MalformedInput);
}
let mut result = Vec::new();
for cose_signature in signatures {
// cose_sign_array[0] holds the protected body header.
let signature = decode_signature_struct(cose_signature, payload, &cose_sign_array[0])?;
result.push(signature);
}
Ok(result)
}
|
ensure_empty_map
|
identifier_name
|
decoder.rs
|
//! Parse and decode COSE signatures.
use cbor::CborType;
use cbor::decoder::decode;
use {CoseError, SignatureAlgorithm};
use util::get_sig_struct_bytes;
use std::collections::BTreeMap;
pub const COSE_SIGN_TAG: u64 = 98;
/// The result of `decode_signature` holding a decoded COSE signature.
#[derive(Debug)]
pub struct CoseSignature {
pub signature_type: SignatureAlgorithm,
pub signature: Vec<u8>,
pub signer_cert: Vec<u8>,
pub certs: Vec<Vec<u8>>,
pub to_verify: Vec<u8>,
}
pub const COSE_TYPE_ES256: i64 = -7;
pub const COSE_TYPE_ES384: i64 = -35;
pub const COSE_TYPE_ES512: i64 = -36;
pub const COSE_TYPE_PS256: i64 = -37;
pub const COSE_HEADER_ALG: u64 = 1;
pub const COSE_HEADER_KID: u64 = 4;
macro_rules! unpack {
($to:tt, $var:ident) => (
match *$var {
CborType::$to(ref cbor_object) => {
cbor_object
}
_ => return Err(CoseError::UnexpectedType),
};
)
}
fn get_map_value(
map: &BTreeMap<CborType, CborType>,
key: &CborType,
) -> Result<CborType, CoseError>
|
/// Ensure that the referenced `CborType` is an empty map.
fn ensure_empty_map(map: &CborType) -> Result<(), CoseError> {
let unpacked = unpack!(Map, map);
if!unpacked.is_empty() {
return Err(CoseError::MalformedInput);
}
Ok(())
}
// This syntax is a little unintuitive. Taken together, the two previous definitions essentially
// mean:
//
// COSE_Sign = [
// protected : empty_or_serialized_map,
// unprotected : header_map
// payload : bstr / nil,
// signatures : [+ COSE_Signature]
// ]
//
// (COSE_Sign is an array. The first element is an empty or serialized map (in our case, it is
// never expected to be empty). The second element is a map (it is expected to be empty. The third
// element is a bstr or nil (it is expected to be nil). The fourth element is an array of
// COSE_Signature.)
//
// COSE_Signature = [
// Headers,
// signature : bstr
// ]
//
// but again, unpacking this:
//
// COSE_Signature = [
// protected : empty_or_serialized_map,
// unprotected : header_map
// signature : bstr
// ]
fn decode_signature_struct(
cose_signature: &CborType,
payload: &[u8],
protected_body_head: &CborType,
) -> Result<CoseSignature, CoseError> {
let cose_signature = unpack!(Array, cose_signature);
if cose_signature.len()!= 3 {
return Err(CoseError::MalformedInput);
}
let protected_signature_header_serialized = &cose_signature[0];
let protected_signature_header_bytes = unpack!(Bytes, protected_signature_header_serialized);
// Parse the protected signature header.
let protected_signature_header = &match decode(protected_signature_header_bytes) {
Err(_) => return Err(CoseError::DecodingFailure),
Ok(value) => value,
};
let protected_signature_header = unpack!(Map, protected_signature_header);
if protected_signature_header.len()!= 2 {
return Err(CoseError::MalformedInput);
}
let signature_algorithm = get_map_value(
protected_signature_header,
&CborType::Integer(COSE_HEADER_ALG),
)?;
let signature_algorithm = match signature_algorithm {
CborType::SignedInteger(val) => {
match val {
COSE_TYPE_ES256 => SignatureAlgorithm::ES256,
COSE_TYPE_ES384 => SignatureAlgorithm::ES384,
COSE_TYPE_ES512 => SignatureAlgorithm::ES512,
COSE_TYPE_PS256 => SignatureAlgorithm::PS256,
_ => return Err(CoseError::UnexpectedHeaderValue),
}
}
_ => return Err(CoseError::UnexpectedType),
};
let ee_cert = &get_map_value(
protected_signature_header,
&CborType::Integer(COSE_HEADER_KID),
)?;
let ee_cert = unpack!(Bytes, ee_cert).clone();
// The unprotected header section is expected to be an empty map.
ensure_empty_map(&cose_signature[1])?;
// Build signature structure to verify.
let signature_bytes = &cose_signature[2];
let signature_bytes = unpack!(Bytes, signature_bytes).clone();
let sig_structure_bytes = get_sig_struct_bytes(
protected_body_head.clone(),
protected_signature_header_serialized.clone(),
payload,
);
// Read intermediate certificates from protected_body_head.
// Any tampering of the protected header during transport will be detected
// because it is input to the signature verification.
// Note that a protected header has to be present and hold a kid with an
// empty list of intermediate certificates.
let protected_body_head_bytes = unpack!(Bytes, protected_body_head);
let protected_body_head_map = &match decode(protected_body_head_bytes) {
Ok(value) => value,
Err(_) => return Err(CoseError::DecodingFailure),
};
let protected_body_head_map = unpack!(Map, protected_body_head_map);
if protected_body_head_map.len()!= 1 {
return Err(CoseError::MalformedInput);
}
let intermediate_certs_array =
&get_map_value(protected_body_head_map, &CborType::Integer(COSE_HEADER_KID))?;
let intermediate_certs = unpack!(Array, intermediate_certs_array);
let mut certs: Vec<Vec<u8>> = Vec::new();
for cert in intermediate_certs {
let cert = unpack!(Bytes, cert);
certs.push(cert.clone());
}
Ok(CoseSignature {
signature_type: signature_algorithm,
signature: signature_bytes,
signer_cert: ee_cert,
certs: certs,
to_verify: sig_structure_bytes,
})
}
/// Decode COSE signature bytes and return a vector of `CoseSignature`.
///
///```rust,ignore
/// COSE_Sign = [
/// Headers,
/// payload : bstr / nil,
/// signatures : [+ COSE_Signature]
/// ]
///
/// Headers = (
/// protected : empty_or_serialized_map,
/// unprotected : header_map
/// )
///```
pub fn decode_signature(bytes: &[u8], payload: &[u8]) -> Result<Vec<CoseSignature>, CoseError> {
// This has to be a COSE_Sign object, which is a tagged array.
let tagged_cose_sign = match decode(bytes) {
Err(_) => return Err(CoseError::DecodingFailure),
Ok(value) => value,
};
let cose_sign_array = match tagged_cose_sign {
CborType::Tag(tag, cose_sign) => {
if tag!= COSE_SIGN_TAG {
return Err(CoseError::UnexpectedTag);
}
match *cose_sign {
CborType::Array(values) => values,
_ => return Err(CoseError::UnexpectedType),
}
}
_ => return Err(CoseError::UnexpectedType),
};
if cose_sign_array.len()!= 4 {
return Err(CoseError::MalformedInput);
}
// The unprotected header section is expected to be an empty map.
ensure_empty_map(&cose_sign_array[1])?;
// The payload is expected to be Null (i.e. this is a detached signature).
match cose_sign_array[2] {
CborType::Null => {}
_ => return Err(CoseError::UnexpectedType),
};
let signatures = &cose_sign_array[3];
let signatures = unpack!(Array, signatures);
// Decode COSE_Signatures.
// There has to be at least one signature to make this a valid COSE signature.
if signatures.len() < 1 {
return Err(CoseError::MalformedInput);
}
let mut result = Vec::new();
for cose_signature in signatures {
// cose_sign_array[0] holds the protected body header.
let signature = decode_signature_struct(cose_signature, payload, &cose_sign_array[0])?;
result.push(signature);
}
Ok(result)
}
|
{
match map.get(key) {
Some(x) => Ok(x.clone()),
_ => Err(CoseError::MissingHeader),
}
}
|
identifier_body
|
decoder.rs
|
//! Parse and decode COSE signatures.
use cbor::CborType;
use cbor::decoder::decode;
use {CoseError, SignatureAlgorithm};
use util::get_sig_struct_bytes;
use std::collections::BTreeMap;
pub const COSE_SIGN_TAG: u64 = 98;
/// The result of `decode_signature` holding a decoded COSE signature.
#[derive(Debug)]
pub struct CoseSignature {
pub signature_type: SignatureAlgorithm,
pub signature: Vec<u8>,
pub signer_cert: Vec<u8>,
pub certs: Vec<Vec<u8>>,
pub to_verify: Vec<u8>,
}
pub const COSE_TYPE_ES256: i64 = -7;
pub const COSE_TYPE_ES384: i64 = -35;
pub const COSE_TYPE_ES512: i64 = -36;
pub const COSE_TYPE_PS256: i64 = -37;
pub const COSE_HEADER_ALG: u64 = 1;
pub const COSE_HEADER_KID: u64 = 4;
macro_rules! unpack {
($to:tt, $var:ident) => (
match *$var {
CborType::$to(ref cbor_object) => {
cbor_object
}
_ => return Err(CoseError::UnexpectedType),
};
)
}
fn get_map_value(
map: &BTreeMap<CborType, CborType>,
key: &CborType,
) -> Result<CborType, CoseError> {
match map.get(key) {
Some(x) => Ok(x.clone()),
_ => Err(CoseError::MissingHeader),
}
}
/// Ensure that the referenced `CborType` is an empty map.
fn ensure_empty_map(map: &CborType) -> Result<(), CoseError> {
let unpacked = unpack!(Map, map);
if!unpacked.is_empty() {
return Err(CoseError::MalformedInput);
}
Ok(())
}
// This syntax is a little unintuitive. Taken together, the two previous definitions essentially
// mean:
//
// COSE_Sign = [
// protected : empty_or_serialized_map,
// unprotected : header_map
// payload : bstr / nil,
// signatures : [+ COSE_Signature]
// ]
//
// (COSE_Sign is an array. The first element is an empty or serialized map (in our case, it is
// never expected to be empty). The second element is a map (it is expected to be empty. The third
// element is a bstr or nil (it is expected to be nil). The fourth element is an array of
// COSE_Signature.)
//
// COSE_Signature = [
// Headers,
// signature : bstr
// ]
//
// but again, unpacking this:
//
// COSE_Signature = [
// protected : empty_or_serialized_map,
// unprotected : header_map
// signature : bstr
// ]
fn decode_signature_struct(
cose_signature: &CborType,
payload: &[u8],
protected_body_head: &CborType,
) -> Result<CoseSignature, CoseError> {
let cose_signature = unpack!(Array, cose_signature);
if cose_signature.len()!= 3 {
return Err(CoseError::MalformedInput);
}
let protected_signature_header_serialized = &cose_signature[0];
let protected_signature_header_bytes = unpack!(Bytes, protected_signature_header_serialized);
// Parse the protected signature header.
let protected_signature_header = &match decode(protected_signature_header_bytes) {
Err(_) => return Err(CoseError::DecodingFailure),
Ok(value) => value,
};
let protected_signature_header = unpack!(Map, protected_signature_header);
if protected_signature_header.len()!= 2 {
return Err(CoseError::MalformedInput);
}
let signature_algorithm = get_map_value(
protected_signature_header,
&CborType::Integer(COSE_HEADER_ALG),
)?;
let signature_algorithm = match signature_algorithm {
CborType::SignedInteger(val) => {
match val {
COSE_TYPE_ES256 => SignatureAlgorithm::ES256,
COSE_TYPE_ES384 => SignatureAlgorithm::ES384,
COSE_TYPE_ES512 => SignatureAlgorithm::ES512,
COSE_TYPE_PS256 => SignatureAlgorithm::PS256,
_ => return Err(CoseError::UnexpectedHeaderValue),
}
}
_ => return Err(CoseError::UnexpectedType),
};
let ee_cert = &get_map_value(
protected_signature_header,
&CborType::Integer(COSE_HEADER_KID),
)?;
let ee_cert = unpack!(Bytes, ee_cert).clone();
// The unprotected header section is expected to be an empty map.
ensure_empty_map(&cose_signature[1])?;
// Build signature structure to verify.
let signature_bytes = &cose_signature[2];
let signature_bytes = unpack!(Bytes, signature_bytes).clone();
let sig_structure_bytes = get_sig_struct_bytes(
protected_body_head.clone(),
protected_signature_header_serialized.clone(),
payload,
);
// Read intermediate certificates from protected_body_head.
// Any tampering of the protected header during transport will be detected
// because it is input to the signature verification.
// Note that a protected header has to be present and hold a kid with an
// empty list of intermediate certificates.
let protected_body_head_bytes = unpack!(Bytes, protected_body_head);
let protected_body_head_map = &match decode(protected_body_head_bytes) {
Ok(value) => value,
Err(_) => return Err(CoseError::DecodingFailure),
};
let protected_body_head_map = unpack!(Map, protected_body_head_map);
if protected_body_head_map.len()!= 1 {
return Err(CoseError::MalformedInput);
}
let intermediate_certs_array =
&get_map_value(protected_body_head_map, &CborType::Integer(COSE_HEADER_KID))?;
let intermediate_certs = unpack!(Array, intermediate_certs_array);
let mut certs: Vec<Vec<u8>> = Vec::new();
for cert in intermediate_certs {
let cert = unpack!(Bytes, cert);
certs.push(cert.clone());
}
Ok(CoseSignature {
signature_type: signature_algorithm,
signature: signature_bytes,
signer_cert: ee_cert,
certs: certs,
to_verify: sig_structure_bytes,
})
}
/// Decode COSE signature bytes and return a vector of `CoseSignature`.
///
///```rust,ignore
/// COSE_Sign = [
/// Headers,
/// payload : bstr / nil,
/// signatures : [+ COSE_Signature]
/// ]
///
/// Headers = (
/// protected : empty_or_serialized_map,
/// unprotected : header_map
/// )
///```
pub fn decode_signature(bytes: &[u8], payload: &[u8]) -> Result<Vec<CoseSignature>, CoseError> {
// This has to be a COSE_Sign object, which is a tagged array.
let tagged_cose_sign = match decode(bytes) {
Err(_) => return Err(CoseError::DecodingFailure),
Ok(value) => value,
};
let cose_sign_array = match tagged_cose_sign {
CborType::Tag(tag, cose_sign) => {
if tag!= COSE_SIGN_TAG {
return Err(CoseError::UnexpectedTag);
}
match *cose_sign {
CborType::Array(values) => values,
_ => return Err(CoseError::UnexpectedType),
}
}
_ => return Err(CoseError::UnexpectedType),
};
if cose_sign_array.len()!= 4 {
return Err(CoseError::MalformedInput);
}
// The unprotected header section is expected to be an empty map.
ensure_empty_map(&cose_sign_array[1])?;
// The payload is expected to be Null (i.e. this is a detached signature).
match cose_sign_array[2] {
CborType::Null => {}
_ => return Err(CoseError::UnexpectedType),
};
let signatures = &cose_sign_array[3];
let signatures = unpack!(Array, signatures);
// Decode COSE_Signatures.
// There has to be at least one signature to make this a valid COSE signature.
if signatures.len() < 1
|
let mut result = Vec::new();
for cose_signature in signatures {
// cose_sign_array[0] holds the protected body header.
let signature = decode_signature_struct(cose_signature, payload, &cose_sign_array[0])?;
result.push(signature);
}
Ok(result)
}
|
{
return Err(CoseError::MalformedInput);
}
|
conditional_block
|
demo.rs
|
extern crate nest;
use nest::*;
use std::f32::consts::PI;
use std::time::Instant;
fn
|
() {
let mut app = Window::new("Demo", 640, 480).expect("error: failed to open window");
let start = Instant::now();
// Load the petal texture.
let petal_texture = app.load_image("examples/petal.png").unwrap();
// Create an image rectangle from the petal texture with a width of 0.4 and proportional height.
let petal = image_w(petal_texture, 0.4).translate([0.3, 0.0]);
// Create flower from 6 petals rotated around the center.
let flower = (0usize..6)
.flat_map(|i| petal.rotate(i as f32 / 6.0 * 2.0 * PI))
.collect::<Vec<_>>();
loop {
// Handle events.
for event in app.poll_events() {
match event {
// Close if they close the window or hit escape.
Event::Closed | Event::KeyboardInput(KeyState::Pressed, Some(Key::Escape)) => {
return
}
// Print "Space!" if they hit space.
Event::KeyboardInput(KeyState::Pressed, Some(Key::Space)) => println!("Space!"),
_ => {}
}
}
// Draw the flower rotating at 1 rad/sec.
app.draw(flower.rotate(start.elapsed().to_secs()));
}
}
|
main
|
identifier_name
|
demo.rs
|
extern crate nest;
use nest::*;
use std::f32::consts::PI;
use std::time::Instant;
fn main() {
let mut app = Window::new("Demo", 640, 480).expect("error: failed to open window");
let start = Instant::now();
// Load the petal texture.
let petal_texture = app.load_image("examples/petal.png").unwrap();
// Create an image rectangle from the petal texture with a width of 0.4 and proportional height.
let petal = image_w(petal_texture, 0.4).translate([0.3, 0.0]);
// Create flower from 6 petals rotated around the center.
let flower = (0usize..6)
.flat_map(|i| petal.rotate(i as f32 / 6.0 * 2.0 * PI))
.collect::<Vec<_>>();
loop {
// Handle events.
for event in app.poll_events() {
match event {
// Close if they close the window or hit escape.
Event::Closed | Event::KeyboardInput(KeyState::Pressed, Some(Key::Escape)) =>
|
// Print "Space!" if they hit space.
Event::KeyboardInput(KeyState::Pressed, Some(Key::Space)) => println!("Space!"),
_ => {}
}
}
// Draw the flower rotating at 1 rad/sec.
app.draw(flower.rotate(start.elapsed().to_secs()));
}
}
|
{
return
}
|
conditional_block
|
demo.rs
|
extern crate nest;
use nest::*;
use std::f32::consts::PI;
use std::time::Instant;
fn main()
|
Event::Closed | Event::KeyboardInput(KeyState::Pressed, Some(Key::Escape)) => {
return
}
// Print "Space!" if they hit space.
Event::KeyboardInput(KeyState::Pressed, Some(Key::Space)) => println!("Space!"),
_ => {}
}
}
// Draw the flower rotating at 1 rad/sec.
app.draw(flower.rotate(start.elapsed().to_secs()));
}
}
|
{
let mut app = Window::new("Demo", 640, 480).expect("error: failed to open window");
let start = Instant::now();
// Load the petal texture.
let petal_texture = app.load_image("examples/petal.png").unwrap();
// Create an image rectangle from the petal texture with a width of 0.4 and proportional height.
let petal = image_w(petal_texture, 0.4).translate([0.3, 0.0]);
// Create flower from 6 petals rotated around the center.
let flower = (0usize..6)
.flat_map(|i| petal.rotate(i as f32 / 6.0 * 2.0 * PI))
.collect::<Vec<_>>();
loop {
// Handle events.
for event in app.poll_events() {
match event {
// Close if they close the window or hit escape.
|
identifier_body
|
demo.rs
|
extern crate nest;
use nest::*;
use std::f32::consts::PI;
use std::time::Instant;
fn main() {
let mut app = Window::new("Demo", 640, 480).expect("error: failed to open window");
let start = Instant::now();
// Load the petal texture.
let petal_texture = app.load_image("examples/petal.png").unwrap();
// Create an image rectangle from the petal texture with a width of 0.4 and proportional height.
let petal = image_w(petal_texture, 0.4).translate([0.3, 0.0]);
// Create flower from 6 petals rotated around the center.
let flower = (0usize..6)
.flat_map(|i| petal.rotate(i as f32 / 6.0 * 2.0 * PI))
.collect::<Vec<_>>();
loop {
// Handle events.
for event in app.poll_events() {
|
// Print "Space!" if they hit space.
Event::KeyboardInput(KeyState::Pressed, Some(Key::Space)) => println!("Space!"),
_ => {}
}
}
// Draw the flower rotating at 1 rad/sec.
app.draw(flower.rotate(start.elapsed().to_secs()));
}
}
|
match event {
// Close if they close the window or hit escape.
Event::Closed | Event::KeyboardInput(KeyState::Pressed, Some(Key::Escape)) => {
return
}
|
random_line_split
|
delete.rs
|
use bit_set::BitSet;
use directory::WritePtr;
use std::io::Write;
use std::io;
use directory::ReadOnlySource;
use DocId;
use common::HasLen;
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
let max_doc = delete_bitset.capacity();
let mut byte = 0u8;
let mut shift = 0u8;
for doc in 0..max_doc {
if delete_bitset.contains(doc) {
byte |= 1 << shift;
}
if shift == 7 {
writer.write_all(&[byte])?;
shift = 0;
byte = 0;
} else
|
}
if max_doc % 8 > 0 {
writer.write_all(&[byte])?;
}
writer.flush()
}
/// Set of deleted `DocId`s.
#[derive(Clone)]
pub struct DeleteBitSet {
data: ReadOnlySource,
len: usize,
}
impl DeleteBitSet {
/// Opens a delete bitset given its data source.
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
let num_deleted: usize = data.as_slice()
.iter()
.map(|b| b.count_ones() as usize)
.sum();
DeleteBitSet {
data: data,
len: num_deleted,
}
}
/// Returns an empty delete bit set.
pub fn empty() -> DeleteBitSet {
DeleteBitSet {
data: ReadOnlySource::empty(),
len: 0,
}
}
/// Returns true iff the segment has some deleted documents.
pub fn has_deletes(&self) -> bool {
self.len() > 0
}
/// Returns true iff the document is deleted.
#[inline]
pub fn is_deleted(&self, doc: DocId) -> bool {
if self.len == 0 {
false
} else {
let byte_offset = doc / 8u32;
let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8;
b & (1u8 << shift)!= 0
}
}
}
impl HasLen for DeleteBitSet {
fn len(&self) -> usize {
self.len
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use bit_set::BitSet;
use directory::*;
use super::*;
fn test_delete_bitset_helper(bitset: &BitSet) {
let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create();
{
let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, &mut writer).unwrap();
}
{
let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
let n = bitset.capacity();
for doc in 0..n {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
}
}
#[test]
fn test_delete_bitset() {
{
let mut bitset = BitSet::with_capacity(10);
bitset.insert(1);
bitset.insert(9);
test_delete_bitset_helper(&bitset);
}
{
let mut bitset = BitSet::with_capacity(8);
bitset.insert(1);
bitset.insert(2);
bitset.insert(3);
bitset.insert(5);
bitset.insert(7);
test_delete_bitset_helper(&bitset);
}
}
}
|
{
shift += 1;
}
|
conditional_block
|
delete.rs
|
use bit_set::BitSet;
use directory::WritePtr;
use std::io::Write;
use std::io;
use directory::ReadOnlySource;
use DocId;
use common::HasLen;
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
let max_doc = delete_bitset.capacity();
let mut byte = 0u8;
let mut shift = 0u8;
for doc in 0..max_doc {
if delete_bitset.contains(doc) {
byte |= 1 << shift;
}
if shift == 7 {
writer.write_all(&[byte])?;
shift = 0;
byte = 0;
} else {
shift += 1;
}
}
if max_doc % 8 > 0 {
writer.write_all(&[byte])?;
}
writer.flush()
}
/// Set of deleted `DocId`s.
#[derive(Clone)]
pub struct DeleteBitSet {
data: ReadOnlySource,
len: usize,
}
impl DeleteBitSet {
/// Opens a delete bitset given its data source.
pub fn open(data: ReadOnlySource) -> DeleteBitSet
|
/// Returns an empty delete bit set.
pub fn empty() -> DeleteBitSet {
DeleteBitSet {
data: ReadOnlySource::empty(),
len: 0,
}
}
/// Returns true iff the segment has some deleted documents.
pub fn has_deletes(&self) -> bool {
self.len() > 0
}
/// Returns true iff the document is deleted.
#[inline]
pub fn is_deleted(&self, doc: DocId) -> bool {
if self.len == 0 {
false
} else {
let byte_offset = doc / 8u32;
let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8;
b & (1u8 << shift)!= 0
}
}
}
impl HasLen for DeleteBitSet {
fn len(&self) -> usize {
self.len
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use bit_set::BitSet;
use directory::*;
use super::*;
fn test_delete_bitset_helper(bitset: &BitSet) {
let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create();
{
let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, &mut writer).unwrap();
}
{
let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
let n = bitset.capacity();
for doc in 0..n {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
}
}
#[test]
fn test_delete_bitset() {
{
let mut bitset = BitSet::with_capacity(10);
bitset.insert(1);
bitset.insert(9);
test_delete_bitset_helper(&bitset);
}
{
let mut bitset = BitSet::with_capacity(8);
bitset.insert(1);
bitset.insert(2);
bitset.insert(3);
bitset.insert(5);
bitset.insert(7);
test_delete_bitset_helper(&bitset);
}
}
}
|
{
let num_deleted: usize = data.as_slice()
.iter()
.map(|b| b.count_ones() as usize)
.sum();
DeleteBitSet {
data: data,
len: num_deleted,
}
}
|
identifier_body
|
delete.rs
|
use bit_set::BitSet;
use directory::WritePtr;
use std::io::Write;
use std::io;
use directory::ReadOnlySource;
use DocId;
use common::HasLen;
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
let max_doc = delete_bitset.capacity();
let mut byte = 0u8;
let mut shift = 0u8;
for doc in 0..max_doc {
if delete_bitset.contains(doc) {
byte |= 1 << shift;
}
if shift == 7 {
writer.write_all(&[byte])?;
shift = 0;
byte = 0;
} else {
shift += 1;
}
}
if max_doc % 8 > 0 {
writer.write_all(&[byte])?;
}
writer.flush()
}
/// Set of deleted `DocId`s.
#[derive(Clone)]
pub struct DeleteBitSet {
data: ReadOnlySource,
len: usize,
}
impl DeleteBitSet {
/// Opens a delete bitset given its data source.
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
let num_deleted: usize = data.as_slice()
.iter()
.map(|b| b.count_ones() as usize)
.sum();
|
data: data,
len: num_deleted,
}
}
/// Returns an empty delete bit set.
pub fn empty() -> DeleteBitSet {
DeleteBitSet {
data: ReadOnlySource::empty(),
len: 0,
}
}
/// Returns true iff the segment has some deleted documents.
pub fn has_deletes(&self) -> bool {
self.len() > 0
}
/// Returns true iff the document is deleted.
#[inline]
pub fn is_deleted(&self, doc: DocId) -> bool {
if self.len == 0 {
false
} else {
let byte_offset = doc / 8u32;
let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8;
b & (1u8 << shift)!= 0
}
}
}
impl HasLen for DeleteBitSet {
fn len(&self) -> usize {
self.len
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use bit_set::BitSet;
use directory::*;
use super::*;
fn test_delete_bitset_helper(bitset: &BitSet) {
let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create();
{
let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, &mut writer).unwrap();
}
{
let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
let n = bitset.capacity();
for doc in 0..n {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
}
}
#[test]
fn test_delete_bitset() {
{
let mut bitset = BitSet::with_capacity(10);
bitset.insert(1);
bitset.insert(9);
test_delete_bitset_helper(&bitset);
}
{
let mut bitset = BitSet::with_capacity(8);
bitset.insert(1);
bitset.insert(2);
bitset.insert(3);
bitset.insert(5);
bitset.insert(7);
test_delete_bitset_helper(&bitset);
}
}
}
|
DeleteBitSet {
|
random_line_split
|
delete.rs
|
use bit_set::BitSet;
use directory::WritePtr;
use std::io::Write;
use std::io;
use directory::ReadOnlySource;
use DocId;
use common::HasLen;
/// Write a delete `BitSet`
///
/// where `delete_bitset` is the set of deleted `DocId`.
pub fn write_delete_bitset(delete_bitset: &BitSet, writer: &mut WritePtr) -> io::Result<()> {
let max_doc = delete_bitset.capacity();
let mut byte = 0u8;
let mut shift = 0u8;
for doc in 0..max_doc {
if delete_bitset.contains(doc) {
byte |= 1 << shift;
}
if shift == 7 {
writer.write_all(&[byte])?;
shift = 0;
byte = 0;
} else {
shift += 1;
}
}
if max_doc % 8 > 0 {
writer.write_all(&[byte])?;
}
writer.flush()
}
/// Set of deleted `DocId`s.
#[derive(Clone)]
pub struct DeleteBitSet {
data: ReadOnlySource,
len: usize,
}
impl DeleteBitSet {
/// Opens a delete bitset given its data source.
pub fn open(data: ReadOnlySource) -> DeleteBitSet {
let num_deleted: usize = data.as_slice()
.iter()
.map(|b| b.count_ones() as usize)
.sum();
DeleteBitSet {
data: data,
len: num_deleted,
}
}
/// Returns an empty delete bit set.
pub fn empty() -> DeleteBitSet {
DeleteBitSet {
data: ReadOnlySource::empty(),
len: 0,
}
}
/// Returns true iff the segment has some deleted documents.
pub fn has_deletes(&self) -> bool {
self.len() > 0
}
/// Returns true iff the document is deleted.
#[inline]
pub fn is_deleted(&self, doc: DocId) -> bool {
if self.len == 0 {
false
} else {
let byte_offset = doc / 8u32;
let b: u8 = (*self.data)[byte_offset as usize];
let shift = (doc & 7u32) as u8;
b & (1u8 << shift)!= 0
}
}
}
impl HasLen for DeleteBitSet {
fn len(&self) -> usize {
self.len
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use bit_set::BitSet;
use directory::*;
use super::*;
fn
|
(bitset: &BitSet) {
let test_path = PathBuf::from("test");
let mut directory = RAMDirectory::create();
{
let mut writer = directory.open_write(&*test_path).unwrap();
write_delete_bitset(bitset, &mut writer).unwrap();
}
{
let source = directory.open_read(&test_path).unwrap();
let delete_bitset = DeleteBitSet::open(source);
let n = bitset.capacity();
for doc in 0..n {
assert_eq!(bitset.contains(doc), delete_bitset.is_deleted(doc as DocId));
}
assert_eq!(delete_bitset.len(), bitset.len());
}
}
#[test]
fn test_delete_bitset() {
{
let mut bitset = BitSet::with_capacity(10);
bitset.insert(1);
bitset.insert(9);
test_delete_bitset_helper(&bitset);
}
{
let mut bitset = BitSet::with_capacity(8);
bitset.insert(1);
bitset.insert(2);
bitset.insert(3);
bitset.insert(5);
bitset.insert(7);
test_delete_bitset_helper(&bitset);
}
}
}
|
test_delete_bitset_helper
|
identifier_name
|
check_static_recursion.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This compiler pass detects static items that refer to themselves
// recursively.
use driver::session::Session;
use middle::resolve;
use middle::def::DefStatic;
use syntax::ast::{Crate, Expr, ExprPath, Item, ItemStatic, NodeId};
use syntax::{ast_util, ast_map};
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a resolve::DefMap,
ast_map: &'a ast_map::Map<'ast>
}
impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
fn visit_item(&mut self, i: &Item) {
check_item(self, i);
}
}
pub fn check_crate<'ast>(sess: &Session,
krate: &Crate,
def_map: &resolve::DefMap,
ast_map: &ast_map::Map<'ast>) {
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
ast_map: ast_map
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
}
fn check_item(v: &mut CheckCrateVisitor, it: &Item) {
match it.node {
ItemStatic(_, _, ref ex) => {
check_item_recursion(v.sess, v.ast_map, v.def_map, it);
visit::walk_expr(v, &**ex)
},
_ => visit::walk_item(v, it)
}
}
struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
root_it: &'a Item,
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a resolve::DefMap,
idstack: Vec<NodeId>
}
// Make sure a const item doesn't recursively refer to itself
// FIXME: Should use the dependency graph when it's available (#1356)
pub fn check_item_recursion<'a>(sess: &'a Session,
ast_map: &'a ast_map::Map,
def_map: &'a resolve::DefMap,
it: &'a Item) {
let mut visitor = CheckItemRecursionVisitor {
root_it: it,
sess: sess,
ast_map: ast_map,
def_map: def_map,
idstack: Vec::new()
};
visitor.visit_item(it);
}
impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
fn
|
(&mut self, it: &Item) {
if self.idstack.iter().any(|x| x == &(it.id)) {
self.sess.span_err(self.root_it.span, "recursive constant");
return;
}
self.idstack.push(it.id);
visit::walk_item(self, it);
self.idstack.pop();
}
fn visit_expr(&mut self, e: &Expr) {
match e.node {
ExprPath(..) => {
match self.def_map.borrow().find(&e.id) {
Some(&DefStatic(def_id, _)) if
ast_util::is_local(def_id) => {
self.visit_item(&*self.ast_map.expect_item(def_id.node));
}
_ => ()
}
},
_ => ()
}
visit::walk_expr(self, e);
}
}
|
visit_item
|
identifier_name
|
check_static_recursion.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This compiler pass detects static items that refer to themselves
// recursively.
|
use syntax::ast::{Crate, Expr, ExprPath, Item, ItemStatic, NodeId};
use syntax::{ast_util, ast_map};
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'ast: 'a> {
sess: &'a Session,
def_map: &'a resolve::DefMap,
ast_map: &'a ast_map::Map<'ast>
}
impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
fn visit_item(&mut self, i: &Item) {
check_item(self, i);
}
}
pub fn check_crate<'ast>(sess: &Session,
krate: &Crate,
def_map: &resolve::DefMap,
ast_map: &ast_map::Map<'ast>) {
let mut visitor = CheckCrateVisitor {
sess: sess,
def_map: def_map,
ast_map: ast_map
};
visit::walk_crate(&mut visitor, krate);
sess.abort_if_errors();
}
fn check_item(v: &mut CheckCrateVisitor, it: &Item) {
match it.node {
ItemStatic(_, _, ref ex) => {
check_item_recursion(v.sess, v.ast_map, v.def_map, it);
visit::walk_expr(v, &**ex)
},
_ => visit::walk_item(v, it)
}
}
struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
root_it: &'a Item,
sess: &'a Session,
ast_map: &'a ast_map::Map<'ast>,
def_map: &'a resolve::DefMap,
idstack: Vec<NodeId>
}
// Make sure a const item doesn't recursively refer to itself
// FIXME: Should use the dependency graph when it's available (#1356)
pub fn check_item_recursion<'a>(sess: &'a Session,
ast_map: &'a ast_map::Map,
def_map: &'a resolve::DefMap,
it: &'a Item) {
let mut visitor = CheckItemRecursionVisitor {
root_it: it,
sess: sess,
ast_map: ast_map,
def_map: def_map,
idstack: Vec::new()
};
visitor.visit_item(it);
}
impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
fn visit_item(&mut self, it: &Item) {
if self.idstack.iter().any(|x| x == &(it.id)) {
self.sess.span_err(self.root_it.span, "recursive constant");
return;
}
self.idstack.push(it.id);
visit::walk_item(self, it);
self.idstack.pop();
}
fn visit_expr(&mut self, e: &Expr) {
match e.node {
ExprPath(..) => {
match self.def_map.borrow().find(&e.id) {
Some(&DefStatic(def_id, _)) if
ast_util::is_local(def_id) => {
self.visit_item(&*self.ast_map.expect_item(def_id.node));
}
_ => ()
}
},
_ => ()
}
visit::walk_expr(self, e);
}
}
|
use driver::session::Session;
use middle::resolve;
use middle::def::DefStatic;
|
random_line_split
|
htmlappletelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLAppletElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLAppletElementDerived;
use dom::bindings::js::JS;
use dom::bindings::error::ErrorResult;
use dom::document::Document;
use dom::element::HTMLAppletElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLAppletElement {
htmlelement: HTMLElement
}
impl HTMLAppletElementDerived for EventTarget {
fn is_htmlappletelement(&self) -> bool {
match self.type_id {
NodeTargetTypeId(ElementNodeTypeId(HTMLAppletElementTypeId)) => true,
_ => false
}
}
}
impl HTMLAppletElement {
pub fn new_inherited(localName: DOMString, document: JS<Document>) -> HTMLAppletElement {
HTMLAppletElement {
htmlelement: HTMLElement::new_inherited(HTMLAppletElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JS<Document>) -> JS<HTMLAppletElement> {
let element = HTMLAppletElement::new_inherited(localName, document.clone());
Node::reflect_node(~element, document, HTMLAppletElementBinding::Wrap)
}
}
impl HTMLAppletElement {
pub fn Align(&self) -> DOMString {
~""
}
pub fn SetAlign(&mut self, _align: DOMString) -> ErrorResult {
Ok(())
}
pub fn Alt(&self) -> DOMString {
~""
}
pub fn SetAlt(&self, _alt: DOMString) -> ErrorResult {
Ok(())
}
pub fn Archive(&self) -> DOMString {
~""
}
pub fn SetArchive(&self, _archive: DOMString) -> ErrorResult {
Ok(())
}
pub fn Code(&self) -> DOMString {
~""
}
pub fn SetCode(&self, _code: DOMString) -> ErrorResult {
Ok(())
}
pub fn CodeBase(&self) -> DOMString {
~""
}
pub fn SetCodeBase(&self, _code_base: DOMString) -> ErrorResult {
Ok(())
}
pub fn Height(&self) -> DOMString {
~""
}
pub fn SetHeight(&self, _height: DOMString) -> ErrorResult {
Ok(())
}
pub fn Hspace(&self) -> u32 {
0
}
pub fn SetHspace(&mut self, _hspace: u32) -> ErrorResult {
Ok(())
}
pub fn Name(&self) -> DOMString {
~""
}
pub fn
|
(&mut self, _name: DOMString) -> ErrorResult {
Ok(())
}
pub fn Object(&self) -> DOMString {
~""
}
pub fn SetObject(&mut self, _object: DOMString) -> ErrorResult {
Ok(())
}
pub fn Vspace(&self) -> u32 {
0
}
pub fn SetVspace(&mut self, _vspace: u32) -> ErrorResult {
Ok(())
}
pub fn Width(&self) -> DOMString {
~""
}
pub fn SetWidth(&mut self, _width: DOMString) -> ErrorResult {
Ok(())
}
}
|
SetName
|
identifier_name
|
htmlappletelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLAppletElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLAppletElementDerived;
use dom::bindings::js::JS;
use dom::bindings::error::ErrorResult;
use dom::document::Document;
use dom::element::HTMLAppletElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLAppletElement {
htmlelement: HTMLElement
}
impl HTMLAppletElementDerived for EventTarget {
fn is_htmlappletelement(&self) -> bool {
match self.type_id {
NodeTargetTypeId(ElementNodeTypeId(HTMLAppletElementTypeId)) => true,
_ => false
}
}
}
impl HTMLAppletElement {
pub fn new_inherited(localName: DOMString, document: JS<Document>) -> HTMLAppletElement {
HTMLAppletElement {
htmlelement: HTMLElement::new_inherited(HTMLAppletElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JS<Document>) -> JS<HTMLAppletElement> {
let element = HTMLAppletElement::new_inherited(localName, document.clone());
Node::reflect_node(~element, document, HTMLAppletElementBinding::Wrap)
}
}
impl HTMLAppletElement {
pub fn Align(&self) -> DOMString {
~""
}
pub fn SetAlign(&mut self, _align: DOMString) -> ErrorResult {
Ok(())
}
pub fn Alt(&self) -> DOMString {
~""
}
pub fn SetAlt(&self, _alt: DOMString) -> ErrorResult {
Ok(())
}
pub fn Archive(&self) -> DOMString {
~""
}
pub fn SetArchive(&self, _archive: DOMString) -> ErrorResult {
Ok(())
}
pub fn Code(&self) -> DOMString {
~""
}
pub fn SetCode(&self, _code: DOMString) -> ErrorResult {
Ok(())
}
pub fn CodeBase(&self) -> DOMString
|
pub fn SetCodeBase(&self, _code_base: DOMString) -> ErrorResult {
Ok(())
}
pub fn Height(&self) -> DOMString {
~""
}
pub fn SetHeight(&self, _height: DOMString) -> ErrorResult {
Ok(())
}
pub fn Hspace(&self) -> u32 {
0
}
pub fn SetHspace(&mut self, _hspace: u32) -> ErrorResult {
Ok(())
}
pub fn Name(&self) -> DOMString {
~""
}
pub fn SetName(&mut self, _name: DOMString) -> ErrorResult {
Ok(())
}
pub fn Object(&self) -> DOMString {
~""
}
pub fn SetObject(&mut self, _object: DOMString) -> ErrorResult {
Ok(())
}
pub fn Vspace(&self) -> u32 {
0
}
pub fn SetVspace(&mut self, _vspace: u32) -> ErrorResult {
Ok(())
}
pub fn Width(&self) -> DOMString {
~""
}
pub fn SetWidth(&mut self, _width: DOMString) -> ErrorResult {
Ok(())
}
}
|
{
~""
}
|
identifier_body
|
htmlappletelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLAppletElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLAppletElementDerived;
use dom::bindings::js::JS;
use dom::bindings::error::ErrorResult;
use dom::document::Document;
use dom::element::HTMLAppletElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLAppletElement {
htmlelement: HTMLElement
}
impl HTMLAppletElementDerived for EventTarget {
fn is_htmlappletelement(&self) -> bool {
match self.type_id {
NodeTargetTypeId(ElementNodeTypeId(HTMLAppletElementTypeId)) => true,
_ => false
}
}
}
impl HTMLAppletElement {
pub fn new_inherited(localName: DOMString, document: JS<Document>) -> HTMLAppletElement {
HTMLAppletElement {
htmlelement: HTMLElement::new_inherited(HTMLAppletElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JS<Document>) -> JS<HTMLAppletElement> {
let element = HTMLAppletElement::new_inherited(localName, document.clone());
Node::reflect_node(~element, document, HTMLAppletElementBinding::Wrap)
}
}
impl HTMLAppletElement {
pub fn Align(&self) -> DOMString {
~""
}
pub fn SetAlign(&mut self, _align: DOMString) -> ErrorResult {
Ok(())
}
pub fn Alt(&self) -> DOMString {
~""
}
pub fn SetAlt(&self, _alt: DOMString) -> ErrorResult {
Ok(())
}
pub fn Archive(&self) -> DOMString {
~""
}
pub fn SetArchive(&self, _archive: DOMString) -> ErrorResult {
Ok(())
}
pub fn Code(&self) -> DOMString {
~""
}
|
Ok(())
}
pub fn CodeBase(&self) -> DOMString {
~""
}
pub fn SetCodeBase(&self, _code_base: DOMString) -> ErrorResult {
Ok(())
}
pub fn Height(&self) -> DOMString {
~""
}
pub fn SetHeight(&self, _height: DOMString) -> ErrorResult {
Ok(())
}
pub fn Hspace(&self) -> u32 {
0
}
pub fn SetHspace(&mut self, _hspace: u32) -> ErrorResult {
Ok(())
}
pub fn Name(&self) -> DOMString {
~""
}
pub fn SetName(&mut self, _name: DOMString) -> ErrorResult {
Ok(())
}
pub fn Object(&self) -> DOMString {
~""
}
pub fn SetObject(&mut self, _object: DOMString) -> ErrorResult {
Ok(())
}
pub fn Vspace(&self) -> u32 {
0
}
pub fn SetVspace(&mut self, _vspace: u32) -> ErrorResult {
Ok(())
}
pub fn Width(&self) -> DOMString {
~""
}
pub fn SetWidth(&mut self, _width: DOMString) -> ErrorResult {
Ok(())
}
}
|
pub fn SetCode(&self, _code: DOMString) -> ErrorResult {
|
random_line_split
|
macros.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Various macro helpers.
macro_rules! exclusive_value {
(($value:ident, $set:expr) => $ident:path) => {
if $value.intersects($set) {
return Err(());
} else {
$ident
|
}
#[cfg(feature = "gecko")]
macro_rules! impl_gecko_keyword_conversions {
($name:ident, $utype:ty) => {
impl From<$utype> for $name {
fn from(bits: $utype) -> $name {
$name::from_gecko_keyword(bits)
}
}
impl From<$name> for $utype {
fn from(v: $name) -> $utype {
v.to_gecko_keyword()
}
}
};
}
macro_rules! trivial_to_computed_value {
($name:ty) => {
impl $crate::values::computed::ToComputedValue for $name {
type ComputedValue = $name;
fn to_computed_value(&self, _: &$crate::values::computed::Context) -> Self {
self.clone()
}
fn from_computed_value(other: &Self) -> Self {
other.clone()
}
}
};
}
/// A macro to parse an identifier, or return an `UnexpectedIdent` error
/// otherwise.
///
/// FIXME(emilio): The fact that `UnexpectedIdent` is a `SelectorParseError`
/// doesn't make a lot of sense to me.
macro_rules! try_match_ident_ignore_ascii_case {
($input:expr, $( $match_body:tt )*) => {{
let location = $input.current_source_location();
let ident = $input.expect_ident_cloned()?;
match_ignore_ascii_case! { &ident,
$( $match_body )*
_ => return Err(location.new_custom_error(
::selectors::parser::SelectorParseErrorKind::UnexpectedIdent(ident.clone())
))
}
}}
}
macro_rules! define_keyword_type {
($name:ident, $css:expr) => {
#[allow(missing_docs)]
#[derive(Animate, Clone, ComputeSquaredDistance, Copy, MallocSizeOf,
PartialEq, SpecifiedValueInfo, ToAnimatedValue, ToAnimatedZero,
ToComputedValue, ToCss)]
pub struct $name;
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str($css)
}
}
impl $crate::parser::Parse for $name {
fn parse<'i, 't>(
_context: &$crate::parser::ParserContext,
input: &mut ::cssparser::Parser<'i, 't>,
) -> Result<$name, ::style_traits::ParseError<'i>> {
input
.expect_ident_matching($css)
.map(|_| $name)
.map_err(|e| e.into())
}
}
};
}
#[cfg(feature = "gecko")]
macro_rules! impl_bitflags_conversions {
($name:ident) => {
impl From<u8> for $name {
fn from(bits: u8) -> $name {
$name::from_bits(bits).expect("bits contain valid flag")
}
}
impl From<$name> for u8 {
fn from(v: $name) -> u8 {
v.bits()
}
}
};
}
|
}
};
|
random_line_split
|
main.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
#[macro_use] extern crate contacts;
#[macro_use] extern crate error_chain;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate serde_derive;
extern crate postgres;
extern crate r2d2;
extern crate r2d2_postgres;
extern crate rocket;
extern crate rocket_contrib;
extern crate serde;
extern crate serde_json;
extern crate uuid;
use std::collections::HashMap;
use r2d2::{Pool, PooledConnection, GetTimeout};
use r2d2_postgres::PostgresConnectionManager;
use rocket::Outcome::{Success, Failure, Forward};
use rocket::Request;
use rocket::http::{Cookie, Cookies, Status};
use rocket::request::{Form, FromRequest, Outcome};
use rocket::response::Redirect;
use rocket_contrib::{Template, UUID};
use uuid::Uuid;
use contacts::config;
use contacts::errors::*;
use contacts::models::{Person, Session, Contact, as_brand};
lazy_static! {
pub static ref DB_POOL: Pool<PostgresConnectionManager> = contacts::create_db_pool().unwrap();
}
pub struct DB(PooledConnection<PostgresConnectionManager>);
impl DB {
pub fn conn(&self) -> &postgres::Connection {
&*self.0
}
}
impl<'a, 'r> FromRequest<'a, 'r> for DB {
type Error = GetTimeout;
fn from_request(_: &'a Request<'r>) -> Outcome<Self, Self::Error> {
match DB_POOL.get() {
Ok(conn) => Success(DB(conn)),
Err(e) => Failure((Status::ServiceUnavailable, e)),
}
}
}
#[derive(Debug, FromForm)]
struct Email {
email: String,
}
#[post("/login", data="<form>")]
fn login(form: Form<Email>, cookies: &Cookies, db: DB) -> Result<Template> {
let &Email { ref email } = form.get();
// if we start an auth flow, kill whatever session may exist
cookies.remove("session");
let res = find!(db,
"SELECT * FROM PEOPLE WHERE people.email = $1",
&email
).map(Person::from_row);
let (me, new) = match res {
Some(me) => (me, false),
None => {
let me = find!(db,
"INSERT INTO PEOPLE (email) VALUES ($1) RETURNING *",
&email)
.map(Person::from_row)
.ok_or("could not create person")?;
(me, true)
}
};
let login_key: Uuid = find!(db,
"INSERT INTO sessions (account) VALUES ($1) RETURNING login_key",
&me.id)
.ok_or("could not insert session")?
.get(0);
contacts::send_login(email, &login_key, new)?;
let mut context = HashMap::new();
context.insert("email", email);
Ok(Template::render("login", &context))
}
#[derive(Debug, FromForm)]
struct LoginKey {
key: UUID,
}
#[get("/login?<form>")]
fn finish_login(form: LoginKey, cookies: &Cookies, db: DB) -> Result<Redirect> {
let LoginKey { ref key } = form;
// if we are in auth flow, kill whatever session may exist
cookies.remove("session");
let session = find!(db,
"SELECT * FROM sessions WHERE login_key = $1",
&key.into_inner())
.map(Session::from_row)
.ok_or("missing session")?;
if session.session_id.is_some() {
bail!("already got this session whoops");
}
let id: Uuid = find!(db,
" UPDATE sessions
SET session_id = uuid_generate_v4()
WHERE login_key = $1
RETURNING session_id",
&key.into_inner())
.ok_or("failed to set session_id")?
.get(0);
let cookie = Cookie::build("session", id.to_string())
//.domain(blah)
.path("/")
//.secure(true)
.http_only(true)
.finish();
cookies.add(cookie);
Ok(Redirect::to("/"))
}
#[derive(Debug)]
struct Me(Person);
fn get_me(cookies: &Cookies) -> Result<Option<Me>> {
let cookie = match cookies.find("session") {
Some(c) => c,
None => {
return Ok(None)
}
};
let claimed_id: Uuid = cookie.value().parse()
.chain_err(|| "Invalid session cookie")?;
let db = DB(DB_POOL.get()?);
let me = find!(db,
"SELECT p.*
FROM people AS p,
sessions AS s
WHERE s.account = p.id
AND s.session_id = $1",
&claimed_id)
.map(|row| Me(Person::from_row(row)));
Ok(me)
}
impl<'a, 'r> FromRequest<'a, 'r> for Me {
type Error = Error;
fn
|
(request: &'a Request<'r>) -> Outcome<Me, Self::Error> {
match get_me(request.cookies()) {
Ok(Some(me)) => Success(me),
Ok(None) => Forward(()),
Err(e) => Failure((Status::ServiceUnavailable, e)),
}
}
}
#[derive(Debug, FromForm)]
struct NewContactForm {
name: String,
info: String,
}
#[post("/contacts", data="<form>")]
fn new_contact(form: Form<NewContactForm>, me: Me, db: DB) -> Result<Redirect> {
let &NewContactForm { ref name, ref info } = form.get();
write!(db, "INSERT INTO contacts (account, name, info)
VALUES ($1, $2, $3)",
&me.0.id, &name, &info);
Ok(Redirect::to("/"))
}
#[derive(Debug, FromForm)]
struct DeleteContactForm {
id: UUID,
next: Option<String>,
}
#[get("/contacts/delete?<form>")]
fn delete_contact(form: DeleteContactForm, me: Me, db: DB) -> Result<Redirect> {
let DeleteContactForm { id, next } = form;
write!(db, "DELETE FROM contacts WHERE id = $1 AND account = $2",
&id.into_inner(), &me.0.id);
Ok(Redirect::to(&next.unwrap_or("/".into())))
}
#[derive(Debug, FromForm)]
#[allow(non_snake_case)]
pub struct StripeSubscribe {
stripeToken: String,
stripeTokenType: String,
stripeEmail: String,
stripeBillingName: String,
stripeBillingAddressLine1: String,
stripeBillingAddressZip: String,
stripeBillingAddressState: String,
stripeBillingAddressCity: String,
stripeBillingAddressCountry: String,
stripeBillingAddressCountryCode: String,
stripeShippingName: String,
stripeShippingAddressLine1: String,
stripeShippingAddressZip: String,
stripeShippingAddressState: String,
stripeShippingAddressCity: String,
stripeShippingAddressCountry: String,
stripeShippingAddressCountryCode: String,
}
#[post("/subscriptions", data="<form>")]
fn subscribe(form: Form<StripeSubscribe>, me: Me, db: DB) -> Result<Redirect> {
let data = form.get();
write!(db, "UPDATE people
SET address = ($2, $3, $4, $5, $6, $7)
WHERE id = $1",
&me.0.id,
&data.stripeShippingName,
&data.stripeShippingAddressLine1,
&data.stripeShippingAddressZip,
&data.stripeShippingAddressCity,
&data.stripeShippingAddressState,
&data.stripeShippingAddressCountry);
let subscriber = contacts::create_customer(&data.stripeToken, &me.0)?;
write!(db, "UPDATE people SET customer = $1 WHERE id = $2",
&subscriber.id, &me.0.id);
let ref source = subscriber.sources.data[0];
write!(db, "INSERT INTO cards (id, brand, country, customer, last4, name)
VALUES ($1, $2, $3, $4, $5, $6)",
&source.id,
&as_brand(&source.brand),
&source.country,
&source.customer,
&source.last4,
&source.name);
Ok(Redirect::to("/"))
}
#[derive(Serialize)]
struct HomeData<'a> {
me: &'a Person,
contacts: &'a [Contact],
current_path: &'a str,
stripe_public_key: &'a str,
}
#[get("/")]
fn home(me: Me, db: DB) -> Result<Template> {
let stripe_public_key: &str = &config::stripe_secret();
let contacts = filter!(db,
"SELECT * FROM contacts WHERE account = $1",
&me.0.id)
.map(Contact::from_row)
.collect::<Vec<_>>();
let context = HomeData {
me: &me.0,
contacts: &contacts,
current_path: "/",
stripe_public_key,
};
Ok(Template::render("home", &context))
}
#[derive(Serialize)]
pub struct NoContext {}
#[get("/", rank = 2)]
fn index() -> Template {
Template::render("index", &NoContext {})
}
#[get("/logout")]
fn logout(cookies: &Cookies) -> Redirect {
cookies.remove("session");
Redirect::to("/")
}
#[error(404)]
fn not_found() -> Template {
Template::render("error-pages/404-not-found", &NoContext {})
}
#[error(500)]
fn internal_server_error() -> Template {
Template::render("error-pages/500-internal-server-error", &NoContext {})
}
#[error(503)]
fn service_unavailable() -> Template {
Template::render("error-pages/503-service-unavailable", &NoContext {})
}
fn main() {
config::check();
rocket::ignite()
.mount("/", routes![
index,
login,
finish_login,
home,
logout,
new_contact,
delete_contact,
subscribe,
])
.catch(errors![
not_found,
internal_server_error,
service_unavailable,
])
.launch();
}
|
from_request
|
identifier_name
|
main.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
#[macro_use] extern crate contacts;
#[macro_use] extern crate error_chain;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate serde_derive;
extern crate postgres;
extern crate r2d2;
extern crate r2d2_postgres;
extern crate rocket;
extern crate rocket_contrib;
extern crate serde;
extern crate serde_json;
extern crate uuid;
use std::collections::HashMap;
use r2d2::{Pool, PooledConnection, GetTimeout};
use r2d2_postgres::PostgresConnectionManager;
use rocket::Outcome::{Success, Failure, Forward};
use rocket::Request;
use rocket::http::{Cookie, Cookies, Status};
use rocket::request::{Form, FromRequest, Outcome};
use rocket::response::Redirect;
use rocket_contrib::{Template, UUID};
use uuid::Uuid;
use contacts::config;
use contacts::errors::*;
use contacts::models::{Person, Session, Contact, as_brand};
lazy_static! {
pub static ref DB_POOL: Pool<PostgresConnectionManager> = contacts::create_db_pool().unwrap();
}
pub struct DB(PooledConnection<PostgresConnectionManager>);
impl DB {
pub fn conn(&self) -> &postgres::Connection {
&*self.0
}
}
impl<'a, 'r> FromRequest<'a, 'r> for DB {
type Error = GetTimeout;
fn from_request(_: &'a Request<'r>) -> Outcome<Self, Self::Error> {
match DB_POOL.get() {
Ok(conn) => Success(DB(conn)),
Err(e) => Failure((Status::ServiceUnavailable, e)),
}
}
}
#[derive(Debug, FromForm)]
struct Email {
email: String,
}
#[post("/login", data="<form>")]
fn login(form: Form<Email>, cookies: &Cookies, db: DB) -> Result<Template> {
let &Email { ref email } = form.get();
// if we start an auth flow, kill whatever session may exist
cookies.remove("session");
let res = find!(db,
"SELECT * FROM PEOPLE WHERE people.email = $1",
&email
).map(Person::from_row);
let (me, new) = match res {
Some(me) => (me, false),
None => {
let me = find!(db,
"INSERT INTO PEOPLE (email) VALUES ($1) RETURNING *",
&email)
.map(Person::from_row)
.ok_or("could not create person")?;
(me, true)
}
};
let login_key: Uuid = find!(db,
"INSERT INTO sessions (account) VALUES ($1) RETURNING login_key",
&me.id)
.ok_or("could not insert session")?
.get(0);
contacts::send_login(email, &login_key, new)?;
let mut context = HashMap::new();
context.insert("email", email);
Ok(Template::render("login", &context))
}
#[derive(Debug, FromForm)]
struct LoginKey {
key: UUID,
}
#[get("/login?<form>")]
fn finish_login(form: LoginKey, cookies: &Cookies, db: DB) -> Result<Redirect> {
let LoginKey { ref key } = form;
// if we are in auth flow, kill whatever session may exist
cookies.remove("session");
let session = find!(db,
"SELECT * FROM sessions WHERE login_key = $1",
&key.into_inner())
.map(Session::from_row)
.ok_or("missing session")?;
if session.session_id.is_some() {
bail!("already got this session whoops");
}
let id: Uuid = find!(db,
" UPDATE sessions
SET session_id = uuid_generate_v4()
|
.get(0);
let cookie = Cookie::build("session", id.to_string())
//.domain(blah)
.path("/")
//.secure(true)
.http_only(true)
.finish();
cookies.add(cookie);
Ok(Redirect::to("/"))
}
#[derive(Debug)]
struct Me(Person);
fn get_me(cookies: &Cookies) -> Result<Option<Me>> {
let cookie = match cookies.find("session") {
Some(c) => c,
None => {
return Ok(None)
}
};
let claimed_id: Uuid = cookie.value().parse()
.chain_err(|| "Invalid session cookie")?;
let db = DB(DB_POOL.get()?);
let me = find!(db,
"SELECT p.*
FROM people AS p,
sessions AS s
WHERE s.account = p.id
AND s.session_id = $1",
&claimed_id)
.map(|row| Me(Person::from_row(row)));
Ok(me)
}
impl<'a, 'r> FromRequest<'a, 'r> for Me {
type Error = Error;
fn from_request(request: &'a Request<'r>) -> Outcome<Me, Self::Error> {
match get_me(request.cookies()) {
Ok(Some(me)) => Success(me),
Ok(None) => Forward(()),
Err(e) => Failure((Status::ServiceUnavailable, e)),
}
}
}
#[derive(Debug, FromForm)]
struct NewContactForm {
name: String,
info: String,
}
#[post("/contacts", data="<form>")]
fn new_contact(form: Form<NewContactForm>, me: Me, db: DB) -> Result<Redirect> {
let &NewContactForm { ref name, ref info } = form.get();
write!(db, "INSERT INTO contacts (account, name, info)
VALUES ($1, $2, $3)",
&me.0.id, &name, &info);
Ok(Redirect::to("/"))
}
#[derive(Debug, FromForm)]
struct DeleteContactForm {
id: UUID,
next: Option<String>,
}
#[get("/contacts/delete?<form>")]
fn delete_contact(form: DeleteContactForm, me: Me, db: DB) -> Result<Redirect> {
let DeleteContactForm { id, next } = form;
write!(db, "DELETE FROM contacts WHERE id = $1 AND account = $2",
&id.into_inner(), &me.0.id);
Ok(Redirect::to(&next.unwrap_or("/".into())))
}
#[derive(Debug, FromForm)]
#[allow(non_snake_case)]
pub struct StripeSubscribe {
stripeToken: String,
stripeTokenType: String,
stripeEmail: String,
stripeBillingName: String,
stripeBillingAddressLine1: String,
stripeBillingAddressZip: String,
stripeBillingAddressState: String,
stripeBillingAddressCity: String,
stripeBillingAddressCountry: String,
stripeBillingAddressCountryCode: String,
stripeShippingName: String,
stripeShippingAddressLine1: String,
stripeShippingAddressZip: String,
stripeShippingAddressState: String,
stripeShippingAddressCity: String,
stripeShippingAddressCountry: String,
stripeShippingAddressCountryCode: String,
}
#[post("/subscriptions", data="<form>")]
fn subscribe(form: Form<StripeSubscribe>, me: Me, db: DB) -> Result<Redirect> {
let data = form.get();
write!(db, "UPDATE people
SET address = ($2, $3, $4, $5, $6, $7)
WHERE id = $1",
&me.0.id,
&data.stripeShippingName,
&data.stripeShippingAddressLine1,
&data.stripeShippingAddressZip,
&data.stripeShippingAddressCity,
&data.stripeShippingAddressState,
&data.stripeShippingAddressCountry);
let subscriber = contacts::create_customer(&data.stripeToken, &me.0)?;
write!(db, "UPDATE people SET customer = $1 WHERE id = $2",
&subscriber.id, &me.0.id);
let ref source = subscriber.sources.data[0];
write!(db, "INSERT INTO cards (id, brand, country, customer, last4, name)
VALUES ($1, $2, $3, $4, $5, $6)",
&source.id,
&as_brand(&source.brand),
&source.country,
&source.customer,
&source.last4,
&source.name);
Ok(Redirect::to("/"))
}
#[derive(Serialize)]
struct HomeData<'a> {
me: &'a Person,
contacts: &'a [Contact],
current_path: &'a str,
stripe_public_key: &'a str,
}
#[get("/")]
fn home(me: Me, db: DB) -> Result<Template> {
let stripe_public_key: &str = &config::stripe_secret();
let contacts = filter!(db,
"SELECT * FROM contacts WHERE account = $1",
&me.0.id)
.map(Contact::from_row)
.collect::<Vec<_>>();
let context = HomeData {
me: &me.0,
contacts: &contacts,
current_path: "/",
stripe_public_key,
};
Ok(Template::render("home", &context))
}
#[derive(Serialize)]
pub struct NoContext {}
#[get("/", rank = 2)]
fn index() -> Template {
Template::render("index", &NoContext {})
}
#[get("/logout")]
fn logout(cookies: &Cookies) -> Redirect {
cookies.remove("session");
Redirect::to("/")
}
#[error(404)]
fn not_found() -> Template {
Template::render("error-pages/404-not-found", &NoContext {})
}
#[error(500)]
fn internal_server_error() -> Template {
Template::render("error-pages/500-internal-server-error", &NoContext {})
}
#[error(503)]
fn service_unavailable() -> Template {
Template::render("error-pages/503-service-unavailable", &NoContext {})
}
fn main() {
config::check();
rocket::ignite()
.mount("/", routes![
index,
login,
finish_login,
home,
logout,
new_contact,
delete_contact,
subscribe,
])
.catch(errors![
not_found,
internal_server_error,
service_unavailable,
])
.launch();
}
|
WHERE login_key = $1
RETURNING session_id",
&key.into_inner())
.ok_or("failed to set session_id")?
|
random_line_split
|
main.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
#[macro_use] extern crate contacts;
#[macro_use] extern crate error_chain;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate serde_derive;
extern crate postgres;
extern crate r2d2;
extern crate r2d2_postgres;
extern crate rocket;
extern crate rocket_contrib;
extern crate serde;
extern crate serde_json;
extern crate uuid;
use std::collections::HashMap;
use r2d2::{Pool, PooledConnection, GetTimeout};
use r2d2_postgres::PostgresConnectionManager;
use rocket::Outcome::{Success, Failure, Forward};
use rocket::Request;
use rocket::http::{Cookie, Cookies, Status};
use rocket::request::{Form, FromRequest, Outcome};
use rocket::response::Redirect;
use rocket_contrib::{Template, UUID};
use uuid::Uuid;
use contacts::config;
use contacts::errors::*;
use contacts::models::{Person, Session, Contact, as_brand};
lazy_static! {
pub static ref DB_POOL: Pool<PostgresConnectionManager> = contacts::create_db_pool().unwrap();
}
pub struct DB(PooledConnection<PostgresConnectionManager>);
impl DB {
pub fn conn(&self) -> &postgres::Connection {
&*self.0
}
}
impl<'a, 'r> FromRequest<'a, 'r> for DB {
type Error = GetTimeout;
fn from_request(_: &'a Request<'r>) -> Outcome<Self, Self::Error> {
match DB_POOL.get() {
Ok(conn) => Success(DB(conn)),
Err(e) => Failure((Status::ServiceUnavailable, e)),
}
}
}
#[derive(Debug, FromForm)]
struct Email {
email: String,
}
#[post("/login", data="<form>")]
fn login(form: Form<Email>, cookies: &Cookies, db: DB) -> Result<Template>
|
}
};
let login_key: Uuid = find!(db,
"INSERT INTO sessions (account) VALUES ($1) RETURNING login_key",
&me.id)
.ok_or("could not insert session")?
.get(0);
contacts::send_login(email, &login_key, new)?;
let mut context = HashMap::new();
context.insert("email", email);
Ok(Template::render("login", &context))
}
#[derive(Debug, FromForm)]
struct LoginKey {
key: UUID,
}
#[get("/login?<form>")]
fn finish_login(form: LoginKey, cookies: &Cookies, db: DB) -> Result<Redirect> {
let LoginKey { ref key } = form;
// if we are in auth flow, kill whatever session may exist
cookies.remove("session");
let session = find!(db,
"SELECT * FROM sessions WHERE login_key = $1",
&key.into_inner())
.map(Session::from_row)
.ok_or("missing session")?;
if session.session_id.is_some() {
bail!("already got this session whoops");
}
let id: Uuid = find!(db,
" UPDATE sessions
SET session_id = uuid_generate_v4()
WHERE login_key = $1
RETURNING session_id",
&key.into_inner())
.ok_or("failed to set session_id")?
.get(0);
let cookie = Cookie::build("session", id.to_string())
//.domain(blah)
.path("/")
//.secure(true)
.http_only(true)
.finish();
cookies.add(cookie);
Ok(Redirect::to("/"))
}
#[derive(Debug)]
struct Me(Person);
fn get_me(cookies: &Cookies) -> Result<Option<Me>> {
let cookie = match cookies.find("session") {
Some(c) => c,
None => {
return Ok(None)
}
};
let claimed_id: Uuid = cookie.value().parse()
.chain_err(|| "Invalid session cookie")?;
let db = DB(DB_POOL.get()?);
let me = find!(db,
"SELECT p.*
FROM people AS p,
sessions AS s
WHERE s.account = p.id
AND s.session_id = $1",
&claimed_id)
.map(|row| Me(Person::from_row(row)));
Ok(me)
}
impl<'a, 'r> FromRequest<'a, 'r> for Me {
type Error = Error;
fn from_request(request: &'a Request<'r>) -> Outcome<Me, Self::Error> {
match get_me(request.cookies()) {
Ok(Some(me)) => Success(me),
Ok(None) => Forward(()),
Err(e) => Failure((Status::ServiceUnavailable, e)),
}
}
}
#[derive(Debug, FromForm)]
struct NewContactForm {
name: String,
info: String,
}
#[post("/contacts", data="<form>")]
fn new_contact(form: Form<NewContactForm>, me: Me, db: DB) -> Result<Redirect> {
let &NewContactForm { ref name, ref info } = form.get();
write!(db, "INSERT INTO contacts (account, name, info)
VALUES ($1, $2, $3)",
&me.0.id, &name, &info);
Ok(Redirect::to("/"))
}
#[derive(Debug, FromForm)]
struct DeleteContactForm {
id: UUID,
next: Option<String>,
}
#[get("/contacts/delete?<form>")]
fn delete_contact(form: DeleteContactForm, me: Me, db: DB) -> Result<Redirect> {
let DeleteContactForm { id, next } = form;
write!(db, "DELETE FROM contacts WHERE id = $1 AND account = $2",
&id.into_inner(), &me.0.id);
Ok(Redirect::to(&next.unwrap_or("/".into())))
}
#[derive(Debug, FromForm)]
#[allow(non_snake_case)]
pub struct StripeSubscribe {
stripeToken: String,
stripeTokenType: String,
stripeEmail: String,
stripeBillingName: String,
stripeBillingAddressLine1: String,
stripeBillingAddressZip: String,
stripeBillingAddressState: String,
stripeBillingAddressCity: String,
stripeBillingAddressCountry: String,
stripeBillingAddressCountryCode: String,
stripeShippingName: String,
stripeShippingAddressLine1: String,
stripeShippingAddressZip: String,
stripeShippingAddressState: String,
stripeShippingAddressCity: String,
stripeShippingAddressCountry: String,
stripeShippingAddressCountryCode: String,
}
#[post("/subscriptions", data="<form>")]
fn subscribe(form: Form<StripeSubscribe>, me: Me, db: DB) -> Result<Redirect> {
let data = form.get();
write!(db, "UPDATE people
SET address = ($2, $3, $4, $5, $6, $7)
WHERE id = $1",
&me.0.id,
&data.stripeShippingName,
&data.stripeShippingAddressLine1,
&data.stripeShippingAddressZip,
&data.stripeShippingAddressCity,
&data.stripeShippingAddressState,
&data.stripeShippingAddressCountry);
let subscriber = contacts::create_customer(&data.stripeToken, &me.0)?;
write!(db, "UPDATE people SET customer = $1 WHERE id = $2",
&subscriber.id, &me.0.id);
let ref source = subscriber.sources.data[0];
write!(db, "INSERT INTO cards (id, brand, country, customer, last4, name)
VALUES ($1, $2, $3, $4, $5, $6)",
&source.id,
&as_brand(&source.brand),
&source.country,
&source.customer,
&source.last4,
&source.name);
Ok(Redirect::to("/"))
}
#[derive(Serialize)]
struct HomeData<'a> {
me: &'a Person,
contacts: &'a [Contact],
current_path: &'a str,
stripe_public_key: &'a str,
}
#[get("/")]
fn home(me: Me, db: DB) -> Result<Template> {
let stripe_public_key: &str = &config::stripe_secret();
let contacts = filter!(db,
"SELECT * FROM contacts WHERE account = $1",
&me.0.id)
.map(Contact::from_row)
.collect::<Vec<_>>();
let context = HomeData {
me: &me.0,
contacts: &contacts,
current_path: "/",
stripe_public_key,
};
Ok(Template::render("home", &context))
}
#[derive(Serialize)]
pub struct NoContext {}
#[get("/", rank = 2)]
fn index() -> Template {
Template::render("index", &NoContext {})
}
#[get("/logout")]
fn logout(cookies: &Cookies) -> Redirect {
cookies.remove("session");
Redirect::to("/")
}
#[error(404)]
fn not_found() -> Template {
Template::render("error-pages/404-not-found", &NoContext {})
}
#[error(500)]
fn internal_server_error() -> Template {
Template::render("error-pages/500-internal-server-error", &NoContext {})
}
#[error(503)]
fn service_unavailable() -> Template {
Template::render("error-pages/503-service-unavailable", &NoContext {})
}
fn main() {
config::check();
rocket::ignite()
.mount("/", routes![
index,
login,
finish_login,
home,
logout,
new_contact,
delete_contact,
subscribe,
])
.catch(errors![
not_found,
internal_server_error,
service_unavailable,
])
.launch();
}
|
{
let &Email { ref email } = form.get();
// if we start an auth flow, kill whatever session may exist
cookies.remove("session");
let res = find!(db,
"SELECT * FROM PEOPLE WHERE people.email = $1",
&email
).map(Person::from_row);
let (me, new) = match res {
Some(me) => (me, false),
None => {
let me = find!(db,
"INSERT INTO PEOPLE (email) VALUES ($1) RETURNING *",
&email)
.map(Person::from_row)
.ok_or("could not create person")?;
(me, true)
|
identifier_body
|
main.rs
|
#![feature(plugin, custom_derive)]
#![plugin(rocket_codegen)]
#[macro_use] extern crate contacts;
#[macro_use] extern crate error_chain;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate serde_derive;
extern crate postgres;
extern crate r2d2;
extern crate r2d2_postgres;
extern crate rocket;
extern crate rocket_contrib;
extern crate serde;
extern crate serde_json;
extern crate uuid;
use std::collections::HashMap;
use r2d2::{Pool, PooledConnection, GetTimeout};
use r2d2_postgres::PostgresConnectionManager;
use rocket::Outcome::{Success, Failure, Forward};
use rocket::Request;
use rocket::http::{Cookie, Cookies, Status};
use rocket::request::{Form, FromRequest, Outcome};
use rocket::response::Redirect;
use rocket_contrib::{Template, UUID};
use uuid::Uuid;
use contacts::config;
use contacts::errors::*;
use contacts::models::{Person, Session, Contact, as_brand};
lazy_static! {
pub static ref DB_POOL: Pool<PostgresConnectionManager> = contacts::create_db_pool().unwrap();
}
pub struct DB(PooledConnection<PostgresConnectionManager>);
impl DB {
pub fn conn(&self) -> &postgres::Connection {
&*self.0
}
}
impl<'a, 'r> FromRequest<'a, 'r> for DB {
type Error = GetTimeout;
fn from_request(_: &'a Request<'r>) -> Outcome<Self, Self::Error> {
match DB_POOL.get() {
Ok(conn) => Success(DB(conn)),
Err(e) => Failure((Status::ServiceUnavailable, e)),
}
}
}
#[derive(Debug, FromForm)]
struct Email {
email: String,
}
#[post("/login", data="<form>")]
fn login(form: Form<Email>, cookies: &Cookies, db: DB) -> Result<Template> {
let &Email { ref email } = form.get();
// if we start an auth flow, kill whatever session may exist
cookies.remove("session");
let res = find!(db,
"SELECT * FROM PEOPLE WHERE people.email = $1",
&email
).map(Person::from_row);
let (me, new) = match res {
Some(me) => (me, false),
None =>
|
};
let login_key: Uuid = find!(db,
"INSERT INTO sessions (account) VALUES ($1) RETURNING login_key",
&me.id)
.ok_or("could not insert session")?
.get(0);
contacts::send_login(email, &login_key, new)?;
let mut context = HashMap::new();
context.insert("email", email);
Ok(Template::render("login", &context))
}
#[derive(Debug, FromForm)]
struct LoginKey {
key: UUID,
}
#[get("/login?<form>")]
fn finish_login(form: LoginKey, cookies: &Cookies, db: DB) -> Result<Redirect> {
let LoginKey { ref key } = form;
// if we are in auth flow, kill whatever session may exist
cookies.remove("session");
let session = find!(db,
"SELECT * FROM sessions WHERE login_key = $1",
&key.into_inner())
.map(Session::from_row)
.ok_or("missing session")?;
if session.session_id.is_some() {
bail!("already got this session whoops");
}
let id: Uuid = find!(db,
" UPDATE sessions
SET session_id = uuid_generate_v4()
WHERE login_key = $1
RETURNING session_id",
&key.into_inner())
.ok_or("failed to set session_id")?
.get(0);
let cookie = Cookie::build("session", id.to_string())
//.domain(blah)
.path("/")
//.secure(true)
.http_only(true)
.finish();
cookies.add(cookie);
Ok(Redirect::to("/"))
}
#[derive(Debug)]
struct Me(Person);
fn get_me(cookies: &Cookies) -> Result<Option<Me>> {
let cookie = match cookies.find("session") {
Some(c) => c,
None => {
return Ok(None)
}
};
let claimed_id: Uuid = cookie.value().parse()
.chain_err(|| "Invalid session cookie")?;
let db = DB(DB_POOL.get()?);
let me = find!(db,
"SELECT p.*
FROM people AS p,
sessions AS s
WHERE s.account = p.id
AND s.session_id = $1",
&claimed_id)
.map(|row| Me(Person::from_row(row)));
Ok(me)
}
impl<'a, 'r> FromRequest<'a, 'r> for Me {
type Error = Error;
fn from_request(request: &'a Request<'r>) -> Outcome<Me, Self::Error> {
match get_me(request.cookies()) {
Ok(Some(me)) => Success(me),
Ok(None) => Forward(()),
Err(e) => Failure((Status::ServiceUnavailable, e)),
}
}
}
#[derive(Debug, FromForm)]
struct NewContactForm {
name: String,
info: String,
}
#[post("/contacts", data="<form>")]
fn new_contact(form: Form<NewContactForm>, me: Me, db: DB) -> Result<Redirect> {
let &NewContactForm { ref name, ref info } = form.get();
write!(db, "INSERT INTO contacts (account, name, info)
VALUES ($1, $2, $3)",
&me.0.id, &name, &info);
Ok(Redirect::to("/"))
}
#[derive(Debug, FromForm)]
struct DeleteContactForm {
id: UUID,
next: Option<String>,
}
#[get("/contacts/delete?<form>")]
fn delete_contact(form: DeleteContactForm, me: Me, db: DB) -> Result<Redirect> {
let DeleteContactForm { id, next } = form;
write!(db, "DELETE FROM contacts WHERE id = $1 AND account = $2",
&id.into_inner(), &me.0.id);
Ok(Redirect::to(&next.unwrap_or("/".into())))
}
#[derive(Debug, FromForm)]
#[allow(non_snake_case)]
pub struct StripeSubscribe {
stripeToken: String,
stripeTokenType: String,
stripeEmail: String,
stripeBillingName: String,
stripeBillingAddressLine1: String,
stripeBillingAddressZip: String,
stripeBillingAddressState: String,
stripeBillingAddressCity: String,
stripeBillingAddressCountry: String,
stripeBillingAddressCountryCode: String,
stripeShippingName: String,
stripeShippingAddressLine1: String,
stripeShippingAddressZip: String,
stripeShippingAddressState: String,
stripeShippingAddressCity: String,
stripeShippingAddressCountry: String,
stripeShippingAddressCountryCode: String,
}
#[post("/subscriptions", data="<form>")]
fn subscribe(form: Form<StripeSubscribe>, me: Me, db: DB) -> Result<Redirect> {
let data = form.get();
write!(db, "UPDATE people
SET address = ($2, $3, $4, $5, $6, $7)
WHERE id = $1",
&me.0.id,
&data.stripeShippingName,
&data.stripeShippingAddressLine1,
&data.stripeShippingAddressZip,
&data.stripeShippingAddressCity,
&data.stripeShippingAddressState,
&data.stripeShippingAddressCountry);
let subscriber = contacts::create_customer(&data.stripeToken, &me.0)?;
write!(db, "UPDATE people SET customer = $1 WHERE id = $2",
&subscriber.id, &me.0.id);
let ref source = subscriber.sources.data[0];
write!(db, "INSERT INTO cards (id, brand, country, customer, last4, name)
VALUES ($1, $2, $3, $4, $5, $6)",
&source.id,
&as_brand(&source.brand),
&source.country,
&source.customer,
&source.last4,
&source.name);
Ok(Redirect::to("/"))
}
#[derive(Serialize)]
struct HomeData<'a> {
me: &'a Person,
contacts: &'a [Contact],
current_path: &'a str,
stripe_public_key: &'a str,
}
#[get("/")]
fn home(me: Me, db: DB) -> Result<Template> {
let stripe_public_key: &str = &config::stripe_secret();
let contacts = filter!(db,
"SELECT * FROM contacts WHERE account = $1",
&me.0.id)
.map(Contact::from_row)
.collect::<Vec<_>>();
let context = HomeData {
me: &me.0,
contacts: &contacts,
current_path: "/",
stripe_public_key,
};
Ok(Template::render("home", &context))
}
#[derive(Serialize)]
pub struct NoContext {}
#[get("/", rank = 2)]
fn index() -> Template {
Template::render("index", &NoContext {})
}
#[get("/logout")]
fn logout(cookies: &Cookies) -> Redirect {
cookies.remove("session");
Redirect::to("/")
}
#[error(404)]
fn not_found() -> Template {
Template::render("error-pages/404-not-found", &NoContext {})
}
#[error(500)]
fn internal_server_error() -> Template {
Template::render("error-pages/500-internal-server-error", &NoContext {})
}
#[error(503)]
fn service_unavailable() -> Template {
Template::render("error-pages/503-service-unavailable", &NoContext {})
}
fn main() {
config::check();
rocket::ignite()
.mount("/", routes![
index,
login,
finish_login,
home,
logout,
new_contact,
delete_contact,
subscribe,
])
.catch(errors![
not_found,
internal_server_error,
service_unavailable,
])
.launch();
}
|
{
let me = find!(db,
"INSERT INTO PEOPLE (email) VALUES ($1) RETURNING *",
&email)
.map(Person::from_row)
.ok_or("could not create person")?;
(me, true)
}
|
conditional_block
|
borrowck-scope-of-deref-issue-4666.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that the scope of the pointer returned from `get()` is
// limited to the deref operation itself, and does not infect the
// block as a whole.
struct Box {
x: uint
}
impl Box {
fn
|
<'a>(&'a self) -> &'a uint {
&self.x
}
fn set(&mut self, x: uint) {
self.x = x;
}
}
fn fun1() {
// in the past, borrow checker behaved differently when
// init and decl of `v` were distinct
let v;
let mut a_box = Box {x: 0};
a_box.set(22);
v = *a_box.get();
a_box.set(v+1);
assert_eq!(23, *a_box.get());
}
fn fun2() {
let mut a_box = Box {x: 0};
a_box.set(22);
let v = *a_box.get();
a_box.set(v+1);
assert_eq!(23, *a_box.get());
}
pub fn main() {
fun1();
fun2();
}
|
get
|
identifier_name
|
borrowck-scope-of-deref-issue-4666.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that the scope of the pointer returned from `get()` is
// limited to the deref operation itself, and does not infect the
// block as a whole.
struct Box {
x: uint
}
impl Box {
fn get<'a>(&'a self) -> &'a uint {
&self.x
}
fn set(&mut self, x: uint) {
self.x = x;
}
}
fn fun1() {
// in the past, borrow checker behaved differently when
// init and decl of `v` were distinct
let v;
let mut a_box = Box {x: 0};
a_box.set(22);
v = *a_box.get();
a_box.set(v+1);
assert_eq!(23, *a_box.get());
}
fn fun2() {
let mut a_box = Box {x: 0};
a_box.set(22);
let v = *a_box.get();
a_box.set(v+1);
|
pub fn main() {
fun1();
fun2();
}
|
assert_eq!(23, *a_box.get());
}
|
random_line_split
|
borrowck-scope-of-deref-issue-4666.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that the scope of the pointer returned from `get()` is
// limited to the deref operation itself, and does not infect the
// block as a whole.
struct Box {
x: uint
}
impl Box {
fn get<'a>(&'a self) -> &'a uint {
&self.x
}
fn set(&mut self, x: uint) {
self.x = x;
}
}
fn fun1() {
// in the past, borrow checker behaved differently when
// init and decl of `v` were distinct
let v;
let mut a_box = Box {x: 0};
a_box.set(22);
v = *a_box.get();
a_box.set(v+1);
assert_eq!(23, *a_box.get());
}
fn fun2()
|
pub fn main() {
fun1();
fun2();
}
|
{
let mut a_box = Box {x: 0};
a_box.set(22);
let v = *a_box.get();
a_box.set(v+1);
assert_eq!(23, *a_box.get());
}
|
identifier_body
|
frames.rs
|
/* Copyright (C) 2017-2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::applayer::StreamSlice;
use crate::core::Flow;
#[repr(C)]
struct CFrame {
_private: [u8; 0],
}
// Defined in app-layer-register.h
extern {
fn AppLayerFrameNewByRelativeOffset(
flow: *const Flow, stream_slice: *const StreamSlice, frame_start_rel: u32, len: i64,
dir: i32, frame_type: u8,
) -> *const CFrame;
fn AppLayerFrameAddEventById(flow: *const Flow, dir: i32, id: i64, event: u8);
fn AppLayerFrameSetLengthById(flow: *const Flow, dir: i32, id: i64, len: i64);
fn AppLayerFrameSetTxIdById(flow: *const Flow, dir: i32, id: i64, tx_id: u64);
fn AppLayerFrameGetId(frame: *const CFrame) -> i64;
}
pub struct Frame {
pub id: i64,
}
impl std::fmt::Debug for Frame {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "frame: {}", self.id)
}
}
impl Frame {
pub fn new(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
dir: i32, frame_type: u8,
) -> Option<Self> {
let offset = frame_start.as_ptr() as usize - stream_slice.as_slice().as_ptr() as usize;
SCLogDebug!("offset {} stream_slice.len() {} frame_start.len() {}", offset, stream_slice.len(), frame_start.len());
let frame = unsafe {
AppLayerFrameNewByRelativeOffset(
flow,
stream_slice,
offset as u32,
frame_len,
dir,
frame_type,
)
};
let id = unsafe { AppLayerFrameGetId(frame) };
if id > 0 {
Some(Self { id })
} else {
None
}
}
pub fn new_ts(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
frame_type: u8,
) -> Option<Self> {
Self::new(flow, stream_slice, frame_start, frame_len, 0, frame_type)
}
pub fn new_tc(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
frame_type: u8,
) -> Option<Self> {
Self::new(flow, stream_slice, frame_start, frame_len, 1, frame_type)
}
pub fn set_len(&self, flow: *const Flow, dir: i32, len: i64) {
unsafe {
AppLayerFrameSetLengthById(flow, dir, self.id, len);
};
}
pub fn set_tx(&self, flow: *const Flow, dir: i32, tx_id: u64) {
unsafe {
AppLayerFrameSetTxIdById(flow, dir, self.id, tx_id);
};
|
pub fn add_event(&self, flow: *const Flow, dir: i32, event: u8) {
unsafe {
AppLayerFrameAddEventById(flow, dir, self.id, event);
};
}
}
|
}
|
random_line_split
|
frames.rs
|
/* Copyright (C) 2017-2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::applayer::StreamSlice;
use crate::core::Flow;
#[repr(C)]
struct CFrame {
_private: [u8; 0],
}
// Defined in app-layer-register.h
extern {
fn AppLayerFrameNewByRelativeOffset(
flow: *const Flow, stream_slice: *const StreamSlice, frame_start_rel: u32, len: i64,
dir: i32, frame_type: u8,
) -> *const CFrame;
fn AppLayerFrameAddEventById(flow: *const Flow, dir: i32, id: i64, event: u8);
fn AppLayerFrameSetLengthById(flow: *const Flow, dir: i32, id: i64, len: i64);
fn AppLayerFrameSetTxIdById(flow: *const Flow, dir: i32, id: i64, tx_id: u64);
fn AppLayerFrameGetId(frame: *const CFrame) -> i64;
}
pub struct Frame {
pub id: i64,
}
impl std::fmt::Debug for Frame {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "frame: {}", self.id)
}
}
impl Frame {
pub fn new(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
dir: i32, frame_type: u8,
) -> Option<Self> {
let offset = frame_start.as_ptr() as usize - stream_slice.as_slice().as_ptr() as usize;
SCLogDebug!("offset {} stream_slice.len() {} frame_start.len() {}", offset, stream_slice.len(), frame_start.len());
let frame = unsafe {
AppLayerFrameNewByRelativeOffset(
flow,
stream_slice,
offset as u32,
frame_len,
dir,
frame_type,
)
};
let id = unsafe { AppLayerFrameGetId(frame) };
if id > 0 {
Some(Self { id })
} else
|
}
pub fn new_ts(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
frame_type: u8,
) -> Option<Self> {
Self::new(flow, stream_slice, frame_start, frame_len, 0, frame_type)
}
pub fn new_tc(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
frame_type: u8,
) -> Option<Self> {
Self::new(flow, stream_slice, frame_start, frame_len, 1, frame_type)
}
pub fn set_len(&self, flow: *const Flow, dir: i32, len: i64) {
unsafe {
AppLayerFrameSetLengthById(flow, dir, self.id, len);
};
}
pub fn set_tx(&self, flow: *const Flow, dir: i32, tx_id: u64) {
unsafe {
AppLayerFrameSetTxIdById(flow, dir, self.id, tx_id);
};
}
pub fn add_event(&self, flow: *const Flow, dir: i32, event: u8) {
unsafe {
AppLayerFrameAddEventById(flow, dir, self.id, event);
};
}
}
|
{
None
}
|
conditional_block
|
frames.rs
|
/* Copyright (C) 2017-2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::applayer::StreamSlice;
use crate::core::Flow;
#[repr(C)]
struct CFrame {
_private: [u8; 0],
}
// Defined in app-layer-register.h
extern {
fn AppLayerFrameNewByRelativeOffset(
flow: *const Flow, stream_slice: *const StreamSlice, frame_start_rel: u32, len: i64,
dir: i32, frame_type: u8,
) -> *const CFrame;
fn AppLayerFrameAddEventById(flow: *const Flow, dir: i32, id: i64, event: u8);
fn AppLayerFrameSetLengthById(flow: *const Flow, dir: i32, id: i64, len: i64);
fn AppLayerFrameSetTxIdById(flow: *const Flow, dir: i32, id: i64, tx_id: u64);
fn AppLayerFrameGetId(frame: *const CFrame) -> i64;
}
pub struct Frame {
pub id: i64,
}
impl std::fmt::Debug for Frame {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result
|
}
impl Frame {
pub fn new(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
dir: i32, frame_type: u8,
) -> Option<Self> {
let offset = frame_start.as_ptr() as usize - stream_slice.as_slice().as_ptr() as usize;
SCLogDebug!("offset {} stream_slice.len() {} frame_start.len() {}", offset, stream_slice.len(), frame_start.len());
let frame = unsafe {
AppLayerFrameNewByRelativeOffset(
flow,
stream_slice,
offset as u32,
frame_len,
dir,
frame_type,
)
};
let id = unsafe { AppLayerFrameGetId(frame) };
if id > 0 {
Some(Self { id })
} else {
None
}
}
pub fn new_ts(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
frame_type: u8,
) -> Option<Self> {
Self::new(flow, stream_slice, frame_start, frame_len, 0, frame_type)
}
pub fn new_tc(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
frame_type: u8,
) -> Option<Self> {
Self::new(flow, stream_slice, frame_start, frame_len, 1, frame_type)
}
pub fn set_len(&self, flow: *const Flow, dir: i32, len: i64) {
unsafe {
AppLayerFrameSetLengthById(flow, dir, self.id, len);
};
}
pub fn set_tx(&self, flow: *const Flow, dir: i32, tx_id: u64) {
unsafe {
AppLayerFrameSetTxIdById(flow, dir, self.id, tx_id);
};
}
pub fn add_event(&self, flow: *const Flow, dir: i32, event: u8) {
unsafe {
AppLayerFrameAddEventById(flow, dir, self.id, event);
};
}
}
|
{
write!(f, "frame: {}", self.id)
}
|
identifier_body
|
frames.rs
|
/* Copyright (C) 2017-2021 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::applayer::StreamSlice;
use crate::core::Flow;
#[repr(C)]
struct CFrame {
_private: [u8; 0],
}
// Defined in app-layer-register.h
extern {
fn AppLayerFrameNewByRelativeOffset(
flow: *const Flow, stream_slice: *const StreamSlice, frame_start_rel: u32, len: i64,
dir: i32, frame_type: u8,
) -> *const CFrame;
fn AppLayerFrameAddEventById(flow: *const Flow, dir: i32, id: i64, event: u8);
fn AppLayerFrameSetLengthById(flow: *const Flow, dir: i32, id: i64, len: i64);
fn AppLayerFrameSetTxIdById(flow: *const Flow, dir: i32, id: i64, tx_id: u64);
fn AppLayerFrameGetId(frame: *const CFrame) -> i64;
}
pub struct Frame {
pub id: i64,
}
impl std::fmt::Debug for Frame {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "frame: {}", self.id)
}
}
impl Frame {
pub fn new(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
dir: i32, frame_type: u8,
) -> Option<Self> {
let offset = frame_start.as_ptr() as usize - stream_slice.as_slice().as_ptr() as usize;
SCLogDebug!("offset {} stream_slice.len() {} frame_start.len() {}", offset, stream_slice.len(), frame_start.len());
let frame = unsafe {
AppLayerFrameNewByRelativeOffset(
flow,
stream_slice,
offset as u32,
frame_len,
dir,
frame_type,
)
};
let id = unsafe { AppLayerFrameGetId(frame) };
if id > 0 {
Some(Self { id })
} else {
None
}
}
pub fn
|
(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
frame_type: u8,
) -> Option<Self> {
Self::new(flow, stream_slice, frame_start, frame_len, 0, frame_type)
}
pub fn new_tc(
flow: *const Flow, stream_slice: &StreamSlice, frame_start: &[u8], frame_len: i64,
frame_type: u8,
) -> Option<Self> {
Self::new(flow, stream_slice, frame_start, frame_len, 1, frame_type)
}
pub fn set_len(&self, flow: *const Flow, dir: i32, len: i64) {
unsafe {
AppLayerFrameSetLengthById(flow, dir, self.id, len);
};
}
pub fn set_tx(&self, flow: *const Flow, dir: i32, tx_id: u64) {
unsafe {
AppLayerFrameSetTxIdById(flow, dir, self.id, tx_id);
};
}
pub fn add_event(&self, flow: *const Flow, dir: i32, event: u8) {
unsafe {
AppLayerFrameAddEventById(flow, dir, self.id, event);
};
}
}
|
new_ts
|
identifier_name
|
cipher.rs
|
use std::sync::Arc;
use super::error::CipherResult;
use super::{Method, Mode};
use super::methods::BelongLib;
use super::crypto_lib::CryptoCipher;
#[cfg(feature = "openssl")]
use super::openssl_lib::OpensslCipher;
pub struct Cipher {
key: Arc<Vec<u8>>,
iv: Vec<u8>,
inner: Box<StreamCipher +'static>,
}
impl Cipher {
pub fn new(method: Method, mode: Mode, key: Arc<Vec<u8>>, iv: Vec<u8>) -> CipherResult<Cipher> {
let cipher: Box<StreamCipher> = match method.belong_lib() {
BelongLib::Crypto => Box::new(CryptoCipher::new(method, mode, &key, &iv)?),
#[cfg(feature = "openssl")]
BelongLib::Openssl => Box::new(OpensslCipher::new(method, mode, &key, &iv)?),
};
Ok(Cipher {
key: key,
iv: iv,
inner: cipher,
})
}
pub fn key(&self) -> &[u8] {
&self.key
}
pub fn iv(&self) -> &[u8] {
&self.iv
}
pub fn set_iv(&mut self, iv: &[u8]) {
self.iv[..].copy_from_slice(iv);
}
pub fn
|
(&self) -> usize {
self.key.len()
}
pub fn iv_len(&self) -> usize {
self.iv.len()
}
}
impl StreamCipher for Cipher {
fn update(&mut self, input: &[u8], output: &mut Vec<u8>) -> CipherResult<()> {
self.inner.update(input, output)
}
}
pub trait StreamCipher {
fn update(&mut self, input: &[u8], output: &mut Vec<u8>) -> CipherResult<()>;
}
|
key_len
|
identifier_name
|
cipher.rs
|
use std::sync::Arc;
use super::error::CipherResult;
use super::{Method, Mode};
use super::methods::BelongLib;
use super::crypto_lib::CryptoCipher;
#[cfg(feature = "openssl")]
use super::openssl_lib::OpensslCipher;
pub struct Cipher {
key: Arc<Vec<u8>>,
iv: Vec<u8>,
inner: Box<StreamCipher +'static>,
}
impl Cipher {
pub fn new(method: Method, mode: Mode, key: Arc<Vec<u8>>, iv: Vec<u8>) -> CipherResult<Cipher> {
let cipher: Box<StreamCipher> = match method.belong_lib() {
BelongLib::Crypto => Box::new(CryptoCipher::new(method, mode, &key, &iv)?),
#[cfg(feature = "openssl")]
BelongLib::Openssl => Box::new(OpensslCipher::new(method, mode, &key, &iv)?),
};
Ok(Cipher {
key: key,
iv: iv,
inner: cipher,
})
}
pub fn key(&self) -> &[u8] {
&self.key
}
pub fn iv(&self) -> &[u8] {
|
self.iv[..].copy_from_slice(iv);
}
pub fn key_len(&self) -> usize {
self.key.len()
}
pub fn iv_len(&self) -> usize {
self.iv.len()
}
}
impl StreamCipher for Cipher {
fn update(&mut self, input: &[u8], output: &mut Vec<u8>) -> CipherResult<()> {
self.inner.update(input, output)
}
}
pub trait StreamCipher {
fn update(&mut self, input: &[u8], output: &mut Vec<u8>) -> CipherResult<()>;
}
|
&self.iv
}
pub fn set_iv(&mut self, iv: &[u8]) {
|
random_line_split
|
cipher.rs
|
use std::sync::Arc;
use super::error::CipherResult;
use super::{Method, Mode};
use super::methods::BelongLib;
use super::crypto_lib::CryptoCipher;
#[cfg(feature = "openssl")]
use super::openssl_lib::OpensslCipher;
pub struct Cipher {
key: Arc<Vec<u8>>,
iv: Vec<u8>,
inner: Box<StreamCipher +'static>,
}
impl Cipher {
pub fn new(method: Method, mode: Mode, key: Arc<Vec<u8>>, iv: Vec<u8>) -> CipherResult<Cipher> {
let cipher: Box<StreamCipher> = match method.belong_lib() {
BelongLib::Crypto => Box::new(CryptoCipher::new(method, mode, &key, &iv)?),
#[cfg(feature = "openssl")]
BelongLib::Openssl => Box::new(OpensslCipher::new(method, mode, &key, &iv)?),
};
Ok(Cipher {
key: key,
iv: iv,
inner: cipher,
})
}
pub fn key(&self) -> &[u8] {
&self.key
}
pub fn iv(&self) -> &[u8] {
&self.iv
}
pub fn set_iv(&mut self, iv: &[u8]) {
self.iv[..].copy_from_slice(iv);
}
pub fn key_len(&self) -> usize {
self.key.len()
}
pub fn iv_len(&self) -> usize
|
}
impl StreamCipher for Cipher {
fn update(&mut self, input: &[u8], output: &mut Vec<u8>) -> CipherResult<()> {
self.inner.update(input, output)
}
}
pub trait StreamCipher {
fn update(&mut self, input: &[u8], output: &mut Vec<u8>) -> CipherResult<()>;
}
|
{
self.iv.len()
}
|
identifier_body
|
assign_node_ids.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use driver::session::Session;
use syntax::ast;
use syntax::fold::ast_fold;
struct NodeIdAssigner {
sess: Session,
}
impl ast_fold for NodeIdAssigner {
fn new_id(&self, old_id: ast::NodeId) -> ast::NodeId {
assert_eq!(old_id, ast::DUMMY_NODE_ID);
self.sess.next_node_id()
}
}
pub fn assign_node_ids(sess: Session, crate: ast::Crate) -> ast::Crate
|
{
let fold = NodeIdAssigner {
sess: sess,
};
fold.fold_crate(crate)
}
|
identifier_body
|
|
assign_node_ids.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use driver::session::Session;
use syntax::ast;
use syntax::fold::ast_fold;
struct NodeIdAssigner {
sess: Session,
}
impl ast_fold for NodeIdAssigner {
fn new_id(&self, old_id: ast::NodeId) -> ast::NodeId {
assert_eq!(old_id, ast::DUMMY_NODE_ID);
self.sess.next_node_id()
}
}
pub fn
|
(sess: Session, crate: ast::Crate) -> ast::Crate {
let fold = NodeIdAssigner {
sess: sess,
};
fold.fold_crate(crate)
}
|
assign_node_ids
|
identifier_name
|
assign_node_ids.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
use syntax::fold::ast_fold;
struct NodeIdAssigner {
sess: Session,
}
impl ast_fold for NodeIdAssigner {
fn new_id(&self, old_id: ast::NodeId) -> ast::NodeId {
assert_eq!(old_id, ast::DUMMY_NODE_ID);
self.sess.next_node_id()
}
}
pub fn assign_node_ids(sess: Session, crate: ast::Crate) -> ast::Crate {
let fold = NodeIdAssigner {
sess: sess,
};
fold.fold_crate(crate)
}
|
// except according to those terms.
use driver::session::Session;
use syntax::ast;
|
random_line_split
|
user_agent.rs
|
header! {
#[doc="`User-Agent` header, defined in"]
#[doc="[RFC7231](http://tools.ietf.org/html/rfc7231#section-5.5.3)"]
#[doc=""]
#[doc="The `User-Agent` header field contains information about the user"]
#[doc="agent originating the request, which is often used by servers to help"]
#[doc="identify the scope of reported interoperability problems, to work"]
#[doc="around or tailor responses to avoid particular user agent"]
#[doc="limitations, and for analytics regarding browser or operating system"]
#[doc="use. A user agent SHOULD send a User-Agent field in each request"]
#[doc="unless specifically configured not to do so."]
#[doc=""]
#[doc="# ABNF"]
#[doc="```plain"]
#[doc="User-Agent = product *( RWS ( product / comment ) )"]
#[doc="product = token [\"/\" product-version]"]
#[doc="product-version = token"]
#[doc="```"]
#[doc=""]
|
#[doc="* The parser does not split the value"]
(UserAgent, "User-Agent") => [String]
test_user_agent {
// Testcase from RFC
test_header!(test1, vec![b"CERN-LineMode/2.15 libwww/2.17b3"]);
// Own testcase
test_header!(test2, vec![b"Bunnies"], Some(UserAgent("Bunnies".to_string())));
}
}
|
#[doc="# Example values"]
#[doc="* `CERN-LineMode/2.15 libwww/2.17b3`"]
#[doc="* `Bunnies`"]
#[doc=""]
#[doc="# Notes"]
|
random_line_split
|
channel_mpsc.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::io;
use std::io::{Error, ErrorKind};
use std::sync::mpsc;
///
/// Handles the channel implementation when in process channels are enabled.
///
pub type PayloadSender = MsgSender<Payload>;
pub type PayloadReceiver = MsgReceiver<Payload>;
impl PayloadSenderHelperMethods for PayloadSender {
fn send_payload(&self, payload: Payload) -> Result<(), Error> {
self.send(payload)
}
}
impl PayloadReceiverHelperMethods for PayloadReceiver {
fn recv_payload(&self) -> Result<Payload, Error> {
self.recv()
}
fn to_mpsc_receiver(self) -> Receiver<Payload> {
self.rx
}
}
pub struct MsgReceiver<T> {
rx: mpsc::Receiver<T>,
}
impl<T> MsgReceiver<T> {
pub fn recv(&self) -> Result<T, Error> {
use std::error::Error;
self.rx.recv().map_err(|e| io::Error::new(ErrorKind::Other, e.description()))
}
}
#[derive(Clone)]
pub struct MsgSender<T> {
tx: mpsc::Sender<T>,
}
impl<T> MsgSender<T> {
pub fn send(&self, data: T) -> Result<(), Error> {
self.tx.send(data).map_err(|_| Error::new(ErrorKind::Other, "cannot send on closed channel"))
}
}
pub fn payload_channel() -> Result<(PayloadSender, PayloadReceiver), Error> {
let (tx, rx) = mpsc::channel();
Ok((PayloadSender { tx }, PayloadReceiver { rx }))
}
pub fn msg_channel<T>() -> Result<(MsgSender<T>, MsgReceiver<T>), Error> {
let (tx, rx) = mpsc::channel();
Ok((MsgSender { tx }, MsgReceiver { rx }))
}
///
/// These serialize methods are needed to satisfy the compiler
/// which uses these implementations for IPC, and also for the
/// recording tool. The recording tool only outputs messages
/// that don't contain Senders or Receivers, so in theory
/// these should never be called in the in-process config.
/// If they are called, there may be a bug in the messages
/// that the replay tool is writing.
///
impl<T> Serialize for MsgReceiver<T> {
fn serialize<S: Serializer>(&self, _: S) -> Result<S::Ok, S::Error> {
unreachable!();
}
}
impl<T> Serialize for MsgSender<T> {
fn serialize<S: Serializer>(&self, _: S) -> Result<S::Ok, S::Error> {
unreachable!();
}
}
impl<'de, T> Deserialize<'de> for MsgReceiver<T> {
fn deserialize<D>(_: D) -> Result<MsgReceiver<T>, D::Error>
where D: Deserializer<'de>
|
}
impl<'de, T> Deserialize<'de> for MsgSender<T> {
fn deserialize<D>(_: D) -> Result<MsgSender<T>, D::Error>
where D: Deserializer<'de> {
unreachable!();
}
}
|
{
unreachable!();
}
|
identifier_body
|
channel_mpsc.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use std::io;
use std::io::{Error, ErrorKind};
use std::sync::mpsc;
///
/// Handles the channel implementation when in process channels are enabled.
///
pub type PayloadSender = MsgSender<Payload>;
pub type PayloadReceiver = MsgReceiver<Payload>;
impl PayloadSenderHelperMethods for PayloadSender {
fn send_payload(&self, payload: Payload) -> Result<(), Error> {
self.send(payload)
}
}
impl PayloadReceiverHelperMethods for PayloadReceiver {
fn recv_payload(&self) -> Result<Payload, Error> {
self.recv()
}
fn to_mpsc_receiver(self) -> Receiver<Payload> {
self.rx
}
}
pub struct MsgReceiver<T> {
rx: mpsc::Receiver<T>,
}
impl<T> MsgReceiver<T> {
pub fn recv(&self) -> Result<T, Error> {
use std::error::Error;
self.rx.recv().map_err(|e| io::Error::new(ErrorKind::Other, e.description()))
}
}
#[derive(Clone)]
pub struct MsgSender<T> {
tx: mpsc::Sender<T>,
}
impl<T> MsgSender<T> {
pub fn send(&self, data: T) -> Result<(), Error> {
self.tx.send(data).map_err(|_| Error::new(ErrorKind::Other, "cannot send on closed channel"))
}
}
pub fn payload_channel() -> Result<(PayloadSender, PayloadReceiver), Error> {
let (tx, rx) = mpsc::channel();
Ok((PayloadSender { tx }, PayloadReceiver { rx }))
}
pub fn msg_channel<T>() -> Result<(MsgSender<T>, MsgReceiver<T>), Error> {
let (tx, rx) = mpsc::channel();
Ok((MsgSender { tx }, MsgReceiver { rx }))
}
///
/// These serialize methods are needed to satisfy the compiler
/// which uses these implementations for IPC, and also for the
/// recording tool. The recording tool only outputs messages
/// that don't contain Senders or Receivers, so in theory
/// these should never be called in the in-process config.
/// If they are called, there may be a bug in the messages
/// that the replay tool is writing.
///
impl<T> Serialize for MsgReceiver<T> {
fn serialize<S: Serializer>(&self, _: S) -> Result<S::Ok, S::Error> {
unreachable!();
}
}
impl<T> Serialize for MsgSender<T> {
fn serialize<S: Serializer>(&self, _: S) -> Result<S::Ok, S::Error> {
unreachable!();
}
}
impl<'de, T> Deserialize<'de> for MsgReceiver<T> {
fn deserialize<D>(_: D) -> Result<MsgReceiver<T>, D::Error>
where D: Deserializer<'de> {
unreachable!();
}
}
impl<'de, T> Deserialize<'de> for MsgSender<T> {
fn deserialize<D>(_: D) -> Result<MsgSender<T>, D::Error>
where D: Deserializer<'de> {
unreachable!();
}
}
|
random_line_split
|
|
channel_mpsc.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::io;
use std::io::{Error, ErrorKind};
use std::sync::mpsc;
///
/// Handles the channel implementation when in process channels are enabled.
///
pub type PayloadSender = MsgSender<Payload>;
pub type PayloadReceiver = MsgReceiver<Payload>;
impl PayloadSenderHelperMethods for PayloadSender {
fn send_payload(&self, payload: Payload) -> Result<(), Error> {
self.send(payload)
}
}
impl PayloadReceiverHelperMethods for PayloadReceiver {
fn recv_payload(&self) -> Result<Payload, Error> {
self.recv()
}
fn to_mpsc_receiver(self) -> Receiver<Payload> {
self.rx
}
}
pub struct MsgReceiver<T> {
rx: mpsc::Receiver<T>,
}
impl<T> MsgReceiver<T> {
pub fn recv(&self) -> Result<T, Error> {
use std::error::Error;
self.rx.recv().map_err(|e| io::Error::new(ErrorKind::Other, e.description()))
}
}
#[derive(Clone)]
pub struct MsgSender<T> {
tx: mpsc::Sender<T>,
}
impl<T> MsgSender<T> {
pub fn send(&self, data: T) -> Result<(), Error> {
self.tx.send(data).map_err(|_| Error::new(ErrorKind::Other, "cannot send on closed channel"))
}
}
pub fn
|
() -> Result<(PayloadSender, PayloadReceiver), Error> {
let (tx, rx) = mpsc::channel();
Ok((PayloadSender { tx }, PayloadReceiver { rx }))
}
pub fn msg_channel<T>() -> Result<(MsgSender<T>, MsgReceiver<T>), Error> {
let (tx, rx) = mpsc::channel();
Ok((MsgSender { tx }, MsgReceiver { rx }))
}
///
/// These serialize methods are needed to satisfy the compiler
/// which uses these implementations for IPC, and also for the
/// recording tool. The recording tool only outputs messages
/// that don't contain Senders or Receivers, so in theory
/// these should never be called in the in-process config.
/// If they are called, there may be a bug in the messages
/// that the replay tool is writing.
///
impl<T> Serialize for MsgReceiver<T> {
fn serialize<S: Serializer>(&self, _: S) -> Result<S::Ok, S::Error> {
unreachable!();
}
}
impl<T> Serialize for MsgSender<T> {
fn serialize<S: Serializer>(&self, _: S) -> Result<S::Ok, S::Error> {
unreachable!();
}
}
impl<'de, T> Deserialize<'de> for MsgReceiver<T> {
fn deserialize<D>(_: D) -> Result<MsgReceiver<T>, D::Error>
where D: Deserializer<'de> {
unreachable!();
}
}
impl<'de, T> Deserialize<'de> for MsgSender<T> {
fn deserialize<D>(_: D) -> Result<MsgSender<T>, D::Error>
where D: Deserializer<'de> {
unreachable!();
}
}
|
payload_channel
|
identifier_name
|
error.rs
|
use rustc_serialize::json::DecoderError;
use rustc_serialize::base64::FromBase64Error;
|
pub enum VaultError {
/// If the vault is corrupted, we may not be able to read the base64 encoded data
Base64Error(FromBase64Error),
/// This happens when the data in the vault is valid, but does not match the vault type
VaultEntrySchemaError(DecoderError),
/// When the decrypted data is not valid JSON
BadPasswordError,
/// When you attempt to encrypt a Vault that has no password set
NoPasswordSpecifiedError
}
/// Convenience type for VaultError functions
pub type VResult<T> = Result<T, VaultError>;
impl From<DecoderError> for VaultError {
fn from(e: DecoderError) -> VaultError {
match e {
DecoderError::ParseError(_) => BadPasswordError,
e => VaultEntrySchemaError(e)
}
}
}
impl From<FromBase64Error> for VaultError {
fn from(e: FromBase64Error) -> VaultError {
Base64Error(e)
}
}
|
use self::VaultError::*;
/// Various errors for vault operations
#[derive(Debug)]
|
random_line_split
|
error.rs
|
use rustc_serialize::json::DecoderError;
use rustc_serialize::base64::FromBase64Error;
use self::VaultError::*;
/// Various errors for vault operations
#[derive(Debug)]
pub enum VaultError {
/// If the vault is corrupted, we may not be able to read the base64 encoded data
Base64Error(FromBase64Error),
/// This happens when the data in the vault is valid, but does not match the vault type
VaultEntrySchemaError(DecoderError),
/// When the decrypted data is not valid JSON
BadPasswordError,
/// When you attempt to encrypt a Vault that has no password set
NoPasswordSpecifiedError
}
/// Convenience type for VaultError functions
pub type VResult<T> = Result<T, VaultError>;
impl From<DecoderError> for VaultError {
fn from(e: DecoderError) -> VaultError {
match e {
DecoderError::ParseError(_) => BadPasswordError,
e => VaultEntrySchemaError(e)
}
}
}
impl From<FromBase64Error> for VaultError {
fn
|
(e: FromBase64Error) -> VaultError {
Base64Error(e)
}
}
|
from
|
identifier_name
|
version.rs
|
//! HTTP Versions enum
//!
//! Instead of relying on typo-prone Strings, use expected HTTP versions as
//! the `HttpVersion` enum.
use std::fmt;
use std::str::FromStr;
use error::Error;
use self::HttpVersion::{Http09, Http10, Http11, H2, H2c};
/// Represents a version of the HTTP spec.
#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash, Debug)]
pub enum HttpVersion {
/// `HTTP/0.9`
Http09,
/// `HTTP/1.0`
Http10,
/// `HTTP/1.1`
Http11,
/// `HTTP/2.0` over TLS
H2,
/// `HTTP/2.0` over cleartext
H2c,
#[doc(hidden)]
__DontMatchMe,
}
impl fmt::Display for HttpVersion {
|
Http11 => "HTTP/1.1",
H2 => "h2",
H2c => "h2c",
HttpVersion::__DontMatchMe => unreachable!(),
})
}
}
impl FromStr for HttpVersion {
type Err = Error;
fn from_str(s: &str) -> Result<HttpVersion, Error> {
Ok(match s {
"HTTP/0.9" => Http09,
"HTTP/1.0" => Http10,
"HTTP/1.1" => Http11,
"h2" => H2,
"h2c" => H2c,
_ => return Err(Error::Version),
})
}
}
impl Default for HttpVersion {
fn default() -> HttpVersion {
Http11
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use error::Error;
use super::HttpVersion;
use super::HttpVersion::{Http09,Http10,Http11,H2,H2c};
#[test]
fn test_default() {
assert_eq!(Http11, HttpVersion::default());
}
#[test]
fn test_from_str() {
assert_eq!(Http09, HttpVersion::from_str("HTTP/0.9").unwrap());
assert_eq!(Http10, HttpVersion::from_str("HTTP/1.0").unwrap());
assert_eq!(Http11, HttpVersion::from_str("HTTP/1.1").unwrap());
assert_eq!(H2, HttpVersion::from_str("h2").unwrap());
assert_eq!(H2c, HttpVersion::from_str("h2c").unwrap());
}
#[test]
fn test_from_str_panic() {
match HttpVersion::from_str("foo") {
Err(Error::Version) => assert!(true),
Err(_) => assert!(false),
Ok(_) => assert!(false),
}
}
}
|
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
Http09 => "HTTP/0.9",
Http10 => "HTTP/1.0",
|
random_line_split
|
version.rs
|
//! HTTP Versions enum
//!
//! Instead of relying on typo-prone Strings, use expected HTTP versions as
//! the `HttpVersion` enum.
use std::fmt;
use std::str::FromStr;
use error::Error;
use self::HttpVersion::{Http09, Http10, Http11, H2, H2c};
/// Represents a version of the HTTP spec.
#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash, Debug)]
pub enum HttpVersion {
/// `HTTP/0.9`
Http09,
/// `HTTP/1.0`
Http10,
/// `HTTP/1.1`
Http11,
/// `HTTP/2.0` over TLS
H2,
/// `HTTP/2.0` over cleartext
H2c,
#[doc(hidden)]
__DontMatchMe,
}
impl fmt::Display for HttpVersion {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
Http09 => "HTTP/0.9",
Http10 => "HTTP/1.0",
Http11 => "HTTP/1.1",
H2 => "h2",
H2c => "h2c",
HttpVersion::__DontMatchMe => unreachable!(),
})
}
}
impl FromStr for HttpVersion {
type Err = Error;
fn
|
(s: &str) -> Result<HttpVersion, Error> {
Ok(match s {
"HTTP/0.9" => Http09,
"HTTP/1.0" => Http10,
"HTTP/1.1" => Http11,
"h2" => H2,
"h2c" => H2c,
_ => return Err(Error::Version),
})
}
}
impl Default for HttpVersion {
fn default() -> HttpVersion {
Http11
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use error::Error;
use super::HttpVersion;
use super::HttpVersion::{Http09,Http10,Http11,H2,H2c};
#[test]
fn test_default() {
assert_eq!(Http11, HttpVersion::default());
}
#[test]
fn test_from_str() {
assert_eq!(Http09, HttpVersion::from_str("HTTP/0.9").unwrap());
assert_eq!(Http10, HttpVersion::from_str("HTTP/1.0").unwrap());
assert_eq!(Http11, HttpVersion::from_str("HTTP/1.1").unwrap());
assert_eq!(H2, HttpVersion::from_str("h2").unwrap());
assert_eq!(H2c, HttpVersion::from_str("h2c").unwrap());
}
#[test]
fn test_from_str_panic() {
match HttpVersion::from_str("foo") {
Err(Error::Version) => assert!(true),
Err(_) => assert!(false),
Ok(_) => assert!(false),
}
}
}
|
from_str
|
identifier_name
|
version.rs
|
//! HTTP Versions enum
//!
//! Instead of relying on typo-prone Strings, use expected HTTP versions as
//! the `HttpVersion` enum.
use std::fmt;
use std::str::FromStr;
use error::Error;
use self::HttpVersion::{Http09, Http10, Http11, H2, H2c};
/// Represents a version of the HTTP spec.
#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash, Debug)]
pub enum HttpVersion {
/// `HTTP/0.9`
Http09,
/// `HTTP/1.0`
Http10,
/// `HTTP/1.1`
Http11,
/// `HTTP/2.0` over TLS
H2,
/// `HTTP/2.0` over cleartext
H2c,
#[doc(hidden)]
__DontMatchMe,
}
impl fmt::Display for HttpVersion {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
Http09 => "HTTP/0.9",
Http10 => "HTTP/1.0",
Http11 => "HTTP/1.1",
H2 => "h2",
H2c => "h2c",
HttpVersion::__DontMatchMe => unreachable!(),
})
}
}
impl FromStr for HttpVersion {
type Err = Error;
fn from_str(s: &str) -> Result<HttpVersion, Error> {
Ok(match s {
"HTTP/0.9" => Http09,
"HTTP/1.0" => Http10,
"HTTP/1.1" => Http11,
"h2" => H2,
"h2c" => H2c,
_ => return Err(Error::Version),
})
}
}
impl Default for HttpVersion {
fn default() -> HttpVersion {
Http11
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use error::Error;
use super::HttpVersion;
use super::HttpVersion::{Http09,Http10,Http11,H2,H2c};
#[test]
fn test_default() {
assert_eq!(Http11, HttpVersion::default());
}
#[test]
fn test_from_str() {
assert_eq!(Http09, HttpVersion::from_str("HTTP/0.9").unwrap());
assert_eq!(Http10, HttpVersion::from_str("HTTP/1.0").unwrap());
assert_eq!(Http11, HttpVersion::from_str("HTTP/1.1").unwrap());
assert_eq!(H2, HttpVersion::from_str("h2").unwrap());
assert_eq!(H2c, HttpVersion::from_str("h2c").unwrap());
}
#[test]
fn test_from_str_panic()
|
}
|
{
match HttpVersion::from_str("foo") {
Err(Error::Version) => assert!(true),
Err(_) => assert!(false),
Ok(_) => assert!(false),
}
}
|
identifier_body
|
pp.rs
|
algorithm decides to break
//! there anyways (because the functions themselves are long) you wind up with
//! extra blank lines. If you don't put hardbreaks you can wind up with the
//! "thing which should be on its own line" not getting its own line in the
//! rare case of "really small functions" or such. This re-occurs with comments
//! and explicit blank lines. So in those cases we use a string with a payload
//! we want isolated to a line and an explicit length that's huge, surrounded
//! by two zero-length breaks. The algorithm will try its best to fit it on a
//! line (which it can't) and so naturally place the content on its own line to
//! avoid combining it with other lines and making matters even worse.
use std::old_io;
use std::string;
use std::iter::repeat;
#[derive(Clone, Copy, PartialEq)]
pub enum Breaks {
Consistent,
Inconsistent,
}
#[derive(Clone, Copy)]
pub struct BreakToken {
offset: isize,
blank_space: isize
}
#[derive(Clone, Copy)]
pub struct BeginToken {
offset: isize,
breaks: Breaks
}
#[derive(Clone)]
pub enum Token {
String(String, isize),
Break(BreakToken),
Begin(BeginToken),
End,
Eof,
}
impl Token {
pub fn is_eof(&self) -> bool {
match *self {
Token::Eof => true,
_ => false,
}
}
pub fn is_hardbreak_tok(&self) -> bool {
match *self {
Token::Break(BreakToken {
offset: 0,
blank_space: bs
}) if bs == SIZE_INFINITY =>
true,
_ =>
false
}
}
}
pub fn tok_str(token: &Token) -> String {
match *token {
Token::String(ref s, len) => format!("STR({},{})", s, len),
Token::Break(_) => "BREAK".to_string(),
Token::Begin(_) => "BEGIN".to_string(),
Token::End => "END".to_string(),
Token::Eof => "EOF".to_string()
}
}
pub fn buf_str(toks: &[Token],
szs: &[isize],
left: usize,
right: usize,
lim: usize)
-> String {
let n = toks.len();
assert_eq!(n, szs.len());
let mut i = left;
let mut l = lim;
let mut s = string::String::from_str("[");
while i!= right && l!= 0 {
l -= 1;
if i!= left {
s.push_str(", ");
}
s.push_str(&format!("{}={}",
szs[i],
tok_str(&toks[i]))[]);
i += 1;
i %= n;
}
s.push(']');
s
}
#[derive(Copy)]
pub enum PrintStackBreak {
Fits,
Broken(Breaks),
}
#[derive(Copy)]
pub struct PrintStackElem {
offset: isize,
pbreak: PrintStackBreak
}
static SIZE_INFINITY: isize = 0xffff;
pub fn mk_printer(out: Box<old_io::Writer+'static>, linewidth: usize) -> Printer {
// Yes 3, it makes the ring buffers big enough to never
// fall behind.
let n: usize = 3 * linewidth;
debug!("mk_printer {}", linewidth);
let token: Vec<Token> = repeat(Token::Eof).take(n).collect();
let size: Vec<isize> = repeat(0).take(n).collect();
let scan_stack: Vec<usize> = repeat(0us).take(n).collect();
Printer {
out: out,
buf_len: n,
margin: linewidth as isize,
space: linewidth as isize,
left: 0,
right: 0,
token: token,
size: size,
left_total: 0,
right_total: 0,
scan_stack: scan_stack,
scan_stack_empty: true,
top: 0,
bottom: 0,
print_stack: Vec::new(),
pending_indentation: 0
}
}
/// In case you do not have the paper, here is an explanation of what's going
/// on.
///
/// There is a stream of input tokens flowing through this printer.
///
/// The printer buffers up to 3N tokens inside itself, where N is linewidth.
/// Yes, linewidth is chars and tokens are multi-char, but in the worst
/// case every token worth buffering is 1 char long, so it's ok.
///
/// Tokens are String, Break, and Begin/End to delimit blocks.
///
/// Begin tokens can carry an offset, saying "how far to indent when you break
/// inside here", as well as a flag indicating "consistent" or "inconsistent"
/// breaking. Consistent breaking means that after the first break, no attempt
/// will be made to flow subsequent breaks together onto lines. Inconsistent
/// is the opposite. Inconsistent breaking example would be, say:
///
/// foo(hello, there, good, friends)
///
/// breaking inconsistently to become
///
/// foo(hello, there
/// good, friends);
///
/// whereas a consistent breaking would yield:
///
/// foo(hello,
/// there
/// good,
/// friends);
///
/// That is, in the consistent-break blocks we value vertical alignment
/// more than the ability to cram stuff onto a line. But in all cases if it
/// can make a block a one-liner, it'll do so.
///
/// Carrying on with high-level logic:
///
/// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
/// 'right' indices denote the active portion of the ring buffer as well as
/// describing hypothetical points-in-the-infinite-stream at most 3N tokens
/// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
/// between using 'left' and 'right' terms to denote the wrapped-to-ring-buffer
/// and point-in-infinite-stream senses freely.
///
/// There is a parallel ring buffer,'size', that holds the calculated size of
/// each token. Why calculated? Because for Begin/End pairs, the "size"
/// includes everything between the pair. That is, the "size" of Begin is
/// actually the sum of the sizes of everything between Begin and the paired
/// End that follows. Since that is arbitrarily far in the future,'size' is
/// being rewritten regularly while the printer runs; in fact most of the
/// machinery is here to work out'size' entries on the fly (and give up when
/// they're so obviously over-long that "infinity" is a good enough
/// approximation for purposes of line breaking).
///
/// The "input side" of the printer is managed as an abstract process called
/// SCAN, which uses'scan_stack','scan_stack_empty', 'top' and 'bottom', to
/// manage calculating'size'. SCAN is, in other words, the process of
/// calculating'size' entries.
///
/// The "output side" of the printer is managed by an abstract process called
/// PRINT, which uses 'print_stack','margin' and'space' to figure out what to
/// do with each token/size pair it consumes as it goes. It's trying to consume
/// the entire buffered window, but can't output anything until the size is >=
/// 0 (sizes are set to negative while they're pending calculation).
///
/// So SCAN takes input and buffers tokens and pending calculations, while
/// PRINT gobbles up completed calculations and tokens from the buffer. The
/// theory is that the two can never get more than 3N tokens apart, because
/// once there's "obviously" too much data to fit on a line, in a size
/// calculation, SCAN will write "infinity" to the size and let PRINT consume
/// it.
///
/// In this implementation (following the paper, again) the SCAN process is
/// the method called 'pretty_print', and the 'PRINT' process is the method
/// called 'print'.
pub struct Printer {
pub out: Box<old_io::Writer+'static>,
buf_len: usize,
/// Width of lines we're constrained to
margin: isize,
/// Number of spaces left on line
space: isize,
/// Index of left side of input stream
left: usize,
/// Index of right side of input stream
right: usize,
/// Ring-buffer stream goes through
token: Vec<Token>,
/// Ring-buffer of calculated sizes
size: Vec<isize>,
/// Running size of stream "...left"
left_total: isize,
/// Running size of stream "...right"
right_total: isize,
/// Pseudo-stack, really a ring too. Holds the
/// primary-ring-buffers index of the Begin that started the
/// current block, possibly with the most recent Break after that
/// Begin (if there is any) on top of it. Stuff is flushed off the
/// bottom as it becomes irrelevant due to the primary ring-buffer
/// advancing.
scan_stack: Vec<usize>,
/// Top==bottom disambiguator
scan_stack_empty: bool,
/// Index of top of scan_stack
top: usize,
/// Index of bottom of scan_stack
bottom: usize,
/// Stack of blocks-in-progress being flushed by print
print_stack: Vec<PrintStackElem>,
/// Buffered indentation to avoid writing trailing whitespace
pending_indentation: isize,
}
impl Printer {
pub fn last_token(&mut self) -> Token {
self.token[self.right].clone()
}
// be very careful with this!
pub fn
|
(&mut self, t: Token) {
self.token[self.right] = t;
}
pub fn pretty_print(&mut self, token: Token) -> old_io::IoResult<()> {
debug!("pp ~[{},{}]", self.left, self.right);
match token {
Token::Eof => {
if!self.scan_stack_empty {
self.check_stack(0);
try!(self.advance_left());
}
self.indent(0);
Ok(())
}
Token::Begin(b) => {
if self.scan_stack_empty {
self.left_total = 1;
self.right_total = 1;
self.left = 0;
self.right = 0;
} else { self.advance_right(); }
debug!("pp Begin({})/buffer ~[{},{}]",
b.offset, self.left, self.right);
self.token[self.right] = token;
self.size[self.right] = -self.right_total;
let right = self.right;
self.scan_push(right);
Ok(())
}
Token::End => {
if self.scan_stack_empty {
debug!("pp End/print ~[{},{}]", self.left, self.right);
self.print(token, 0)
} else {
debug!("pp End/buffer ~[{},{}]", self.left, self.right);
self.advance_right();
self.token[self.right] = token;
self.size[self.right] = -1;
let right = self.right;
self.scan_push(right);
Ok(())
}
}
Token::Break(b) => {
if self.scan_stack_empty {
self.left_total = 1;
self.right_total = 1;
self.left = 0;
self.right = 0;
} else { self.advance_right(); }
debug!("pp Break({})/buffer ~[{},{}]",
b.offset, self.left, self.right);
self.check_stack(0);
let right = self.right;
self.scan_push(right);
self.token[self.right] = token;
self.size[self.right] = -self.right_total;
self.right_total += b.blank_space;
Ok(())
}
Token::String(s, len) => {
if self.scan_stack_empty {
debug!("pp String('{}')/print ~[{},{}]",
s, self.left, self.right);
self.print(Token::String(s, len), len)
} else {
debug!("pp String('{}')/buffer ~[{},{}]",
s, self.left, self.right);
self.advance_right();
self.token[self.right] = Token::String(s, len);
self.size[self.right] = len;
self.right_total += len;
self.check_stream()
}
}
}
}
pub fn check_stream(&mut self) -> old_io::IoResult<()> {
debug!("check_stream ~[{}, {}] with left_total={}, right_total={}",
self.left, self.right, self.left_total, self.right_total);
if self.right_total - self.left_total > self.space {
debug!("scan window is {}, longer than space on line ({})",
self.right_total - self.left_total, self.space);
if!self.scan_stack_empty {
if self.left == self.scan_stack[self.bottom] {
debug!("setting {} to infinity and popping", self.left);
let scanned = self.scan_pop_bottom();
self.size[scanned] = SIZE_INFINITY;
}
}
try!(self.advance_left());
if self.left!= self.right {
try!(self.check_stream());
}
}
Ok(())
}
pub fn scan_push(&mut self, x: usize) {
debug!("scan_push {}", x);
if self.scan_stack_empty {
self.scan_stack_empty = false;
} else {
self.top += 1;
self.top %= self.buf_len;
assert!((self.top!= self.bottom));
}
self.scan_stack[self.top] = x;
}
pub fn scan_pop(&mut self) -> usize {
assert!((!self.scan_stack_empty));
let x = self.scan_stack[self.top];
if self.top == self.bottom {
self.scan_stack_empty = true;
} else {
self.top += self.buf_len - 1; self.top %= self.buf_len;
}
return x;
}
pub fn scan_top(&mut self) -> usize {
assert!((!self.scan_stack_empty));
return self.scan_stack[self.top];
}
pub fn scan_pop_bottom(&mut self) -> usize {
assert!((!self.scan_stack_empty));
let x = self.scan_stack[self.bottom];
if self.top == self.bottom {
self.scan_stack_empty = true;
} else {
self.bottom += 1; self.bottom %= self.buf_len;
}
return x;
}
pub fn advance_right(&mut self) {
self.right += 1;
self.right %= self.buf_len;
assert!((self.right!= self.left));
}
pub fn advance_left(&mut self) -> old_io::IoResult<()> {
debug!("advance_left ~[{},{}], sizeof({})={}", self.left, self.right,
self.left, self.size[self.left]);
let mut left_size = self.size[self.left];
while left_size >= 0 {
let left = self.token[self.left].clone();
let len = match left {
Token::Break(b) => b.blank_space,
Token::String(_, len) => {
assert_eq!(len, left_size);
len
}
_ => 0
};
try!(self.print(left, left_size));
self.left_total += len;
if self.left == self.right {
break;
}
self.left += 1;
self.left %= self.buf_len;
left_size = self.size[self.left];
}
Ok(())
}
pub fn check_stack(&mut self, k: isize) {
if!self.scan_stack_empty {
let x = self.scan_top();
match self.token[x] {
Token::Begin(_) => {
if k > 0 {
let popped = self.scan_pop();
self.size[popped] = self.size[x] + self.right_total;
self.check_stack(k - 1);
}
}
Token::End => {
// paper says + not =, but that makes no sense.
let popped = self.scan_pop();
self.size[popped] = 1;
self.check_stack(k + 1);
}
_ => {
let popped = self.scan_pop();
self.size[popped] = self.size[x] + self.right_total;
if k > 0 {
self.check_stack(k);
}
}
}
}
}
pub fn print_newline(&mut self, amount: isize) -> old_io::IoResult<()> {
debug!("NEWLINE {}", amount);
let ret = write!(self.out, "\n");
self.pending_indentation = 0;
self.indent(amount);
return ret;
}
pub fn indent(&mut self, amount: isize) {
debug!("INDENT {}", amount);
self.pending_indentation += amount;
}
pub fn get_top(&mut self) -> PrintStackElem {
let print_stack = &mut self.print_stack;
let n = print_stack.len();
if n!= 0 {
(*print_stack)[n - 1]
} else {
PrintStackElem {
offset: 0,
pbreak: PrintStackBreak::Broken(Breaks::Inconsistent)
}
}
}
pub fn print_str(&mut self, s: &str) -> old_io::IoResult<()> {
while self.pending_indentation > 0 {
try!(write!(self.out, " "));
self.pending_indentation -= 1;
}
write!(self.out, "{}", s)
}
pub fn print(&mut self, token: Token, l: isize) -> old_io::IoResult<()> {
debug!("print {} {} (remaining line space={})", tok_str(&token), l,
self.space);
debug!("{}", buf_str(&self.token[],
&self.size[],
self.left,
self.right,
6));
match token {
Token::Begin(b) => {
if l > self.space {
let col = self.margin - self.space + b.offset;
debug!("print Begin -> push broken block at col {}", col);
self.print_stack.push(PrintStackElem {
offset: col,
pbreak: PrintStackBreak::Broken(b.breaks)
});
} else {
debug!("print Begin -> push fitting block");
self.print_stack.push(PrintStackElem {
offset: 0,
pbreak: PrintStackBreak::Fits
});
}
Ok(())
}
Token::End => {
debug!("print End -> pop End");
let print_stack = &mut self.print_stack;
assert!((print_stack.len()!= 0));
print_stack.pop().unwrap();
Ok(())
}
Token::Break(b) => {
let top = self.get_top();
match top.pbreak {
PrintStackBreak::Fits => {
debug!("print Break({}) in fitting block", b.blank_space);
|
replace_last_token
|
identifier_name
|
pp.rs
|
pending_indentation: 0
}
}
/// In case you do not have the paper, here is an explanation of what's going
/// on.
///
/// There is a stream of input tokens flowing through this printer.
///
/// The printer buffers up to 3N tokens inside itself, where N is linewidth.
/// Yes, linewidth is chars and tokens are multi-char, but in the worst
/// case every token worth buffering is 1 char long, so it's ok.
///
/// Tokens are String, Break, and Begin/End to delimit blocks.
///
/// Begin tokens can carry an offset, saying "how far to indent when you break
/// inside here", as well as a flag indicating "consistent" or "inconsistent"
/// breaking. Consistent breaking means that after the first break, no attempt
/// will be made to flow subsequent breaks together onto lines. Inconsistent
/// is the opposite. Inconsistent breaking example would be, say:
///
/// foo(hello, there, good, friends)
///
/// breaking inconsistently to become
///
/// foo(hello, there
/// good, friends);
///
/// whereas a consistent breaking would yield:
///
/// foo(hello,
/// there
/// good,
/// friends);
///
/// That is, in the consistent-break blocks we value vertical alignment
/// more than the ability to cram stuff onto a line. But in all cases if it
/// can make a block a one-liner, it'll do so.
///
/// Carrying on with high-level logic:
///
/// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
/// 'right' indices denote the active portion of the ring buffer as well as
/// describing hypothetical points-in-the-infinite-stream at most 3N tokens
/// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
/// between using 'left' and 'right' terms to denote the wrapped-to-ring-buffer
/// and point-in-infinite-stream senses freely.
///
/// There is a parallel ring buffer,'size', that holds the calculated size of
/// each token. Why calculated? Because for Begin/End pairs, the "size"
/// includes everything between the pair. That is, the "size" of Begin is
/// actually the sum of the sizes of everything between Begin and the paired
/// End that follows. Since that is arbitrarily far in the future,'size' is
/// being rewritten regularly while the printer runs; in fact most of the
/// machinery is here to work out'size' entries on the fly (and give up when
/// they're so obviously over-long that "infinity" is a good enough
/// approximation for purposes of line breaking).
///
/// The "input side" of the printer is managed as an abstract process called
/// SCAN, which uses'scan_stack','scan_stack_empty', 'top' and 'bottom', to
/// manage calculating'size'. SCAN is, in other words, the process of
/// calculating'size' entries.
///
/// The "output side" of the printer is managed by an abstract process called
/// PRINT, which uses 'print_stack','margin' and'space' to figure out what to
/// do with each token/size pair it consumes as it goes. It's trying to consume
/// the entire buffered window, but can't output anything until the size is >=
/// 0 (sizes are set to negative while they're pending calculation).
///
/// So SCAN takes input and buffers tokens and pending calculations, while
/// PRINT gobbles up completed calculations and tokens from the buffer. The
/// theory is that the two can never get more than 3N tokens apart, because
/// once there's "obviously" too much data to fit on a line, in a size
/// calculation, SCAN will write "infinity" to the size and let PRINT consume
/// it.
///
/// In this implementation (following the paper, again) the SCAN process is
/// the method called 'pretty_print', and the 'PRINT' process is the method
/// called 'print'.
pub struct Printer {
pub out: Box<old_io::Writer+'static>,
buf_len: usize,
/// Width of lines we're constrained to
margin: isize,
/// Number of spaces left on line
space: isize,
/// Index of left side of input stream
left: usize,
/// Index of right side of input stream
right: usize,
/// Ring-buffer stream goes through
token: Vec<Token>,
/// Ring-buffer of calculated sizes
size: Vec<isize>,
/// Running size of stream "...left"
left_total: isize,
/// Running size of stream "...right"
right_total: isize,
/// Pseudo-stack, really a ring too. Holds the
/// primary-ring-buffers index of the Begin that started the
/// current block, possibly with the most recent Break after that
/// Begin (if there is any) on top of it. Stuff is flushed off the
/// bottom as it becomes irrelevant due to the primary ring-buffer
/// advancing.
scan_stack: Vec<usize>,
/// Top==bottom disambiguator
scan_stack_empty: bool,
/// Index of top of scan_stack
top: usize,
/// Index of bottom of scan_stack
bottom: usize,
/// Stack of blocks-in-progress being flushed by print
print_stack: Vec<PrintStackElem>,
/// Buffered indentation to avoid writing trailing whitespace
pending_indentation: isize,
}
impl Printer {
pub fn last_token(&mut self) -> Token {
self.token[self.right].clone()
}
// be very careful with this!
pub fn replace_last_token(&mut self, t: Token) {
self.token[self.right] = t;
}
pub fn pretty_print(&mut self, token: Token) -> old_io::IoResult<()> {
debug!("pp ~[{},{}]", self.left, self.right);
match token {
Token::Eof => {
if!self.scan_stack_empty {
self.check_stack(0);
try!(self.advance_left());
}
self.indent(0);
Ok(())
}
Token::Begin(b) => {
if self.scan_stack_empty {
self.left_total = 1;
self.right_total = 1;
self.left = 0;
self.right = 0;
} else { self.advance_right(); }
debug!("pp Begin({})/buffer ~[{},{}]",
b.offset, self.left, self.right);
self.token[self.right] = token;
self.size[self.right] = -self.right_total;
let right = self.right;
self.scan_push(right);
Ok(())
}
Token::End => {
if self.scan_stack_empty {
debug!("pp End/print ~[{},{}]", self.left, self.right);
self.print(token, 0)
} else {
debug!("pp End/buffer ~[{},{}]", self.left, self.right);
self.advance_right();
self.token[self.right] = token;
self.size[self.right] = -1;
let right = self.right;
self.scan_push(right);
Ok(())
}
}
Token::Break(b) => {
if self.scan_stack_empty {
self.left_total = 1;
self.right_total = 1;
self.left = 0;
self.right = 0;
} else { self.advance_right(); }
debug!("pp Break({})/buffer ~[{},{}]",
b.offset, self.left, self.right);
self.check_stack(0);
let right = self.right;
self.scan_push(right);
self.token[self.right] = token;
self.size[self.right] = -self.right_total;
self.right_total += b.blank_space;
Ok(())
}
Token::String(s, len) => {
if self.scan_stack_empty {
debug!("pp String('{}')/print ~[{},{}]",
s, self.left, self.right);
self.print(Token::String(s, len), len)
} else {
debug!("pp String('{}')/buffer ~[{},{}]",
s, self.left, self.right);
self.advance_right();
self.token[self.right] = Token::String(s, len);
self.size[self.right] = len;
self.right_total += len;
self.check_stream()
}
}
}
}
pub fn check_stream(&mut self) -> old_io::IoResult<()> {
debug!("check_stream ~[{}, {}] with left_total={}, right_total={}",
self.left, self.right, self.left_total, self.right_total);
if self.right_total - self.left_total > self.space {
debug!("scan window is {}, longer than space on line ({})",
self.right_total - self.left_total, self.space);
if!self.scan_stack_empty {
if self.left == self.scan_stack[self.bottom] {
debug!("setting {} to infinity and popping", self.left);
let scanned = self.scan_pop_bottom();
self.size[scanned] = SIZE_INFINITY;
}
}
try!(self.advance_left());
if self.left!= self.right {
try!(self.check_stream());
}
}
Ok(())
}
pub fn scan_push(&mut self, x: usize) {
debug!("scan_push {}", x);
if self.scan_stack_empty {
self.scan_stack_empty = false;
} else {
self.top += 1;
self.top %= self.buf_len;
assert!((self.top!= self.bottom));
}
self.scan_stack[self.top] = x;
}
pub fn scan_pop(&mut self) -> usize {
assert!((!self.scan_stack_empty));
let x = self.scan_stack[self.top];
if self.top == self.bottom {
self.scan_stack_empty = true;
} else {
self.top += self.buf_len - 1; self.top %= self.buf_len;
}
return x;
}
pub fn scan_top(&mut self) -> usize {
assert!((!self.scan_stack_empty));
return self.scan_stack[self.top];
}
pub fn scan_pop_bottom(&mut self) -> usize {
assert!((!self.scan_stack_empty));
let x = self.scan_stack[self.bottom];
if self.top == self.bottom {
self.scan_stack_empty = true;
} else {
self.bottom += 1; self.bottom %= self.buf_len;
}
return x;
}
pub fn advance_right(&mut self) {
self.right += 1;
self.right %= self.buf_len;
assert!((self.right!= self.left));
}
pub fn advance_left(&mut self) -> old_io::IoResult<()> {
debug!("advance_left ~[{},{}], sizeof({})={}", self.left, self.right,
self.left, self.size[self.left]);
let mut left_size = self.size[self.left];
while left_size >= 0 {
let left = self.token[self.left].clone();
let len = match left {
Token::Break(b) => b.blank_space,
Token::String(_, len) => {
assert_eq!(len, left_size);
len
}
_ => 0
};
try!(self.print(left, left_size));
self.left_total += len;
if self.left == self.right {
break;
}
self.left += 1;
self.left %= self.buf_len;
left_size = self.size[self.left];
}
Ok(())
}
pub fn check_stack(&mut self, k: isize) {
if!self.scan_stack_empty {
let x = self.scan_top();
match self.token[x] {
Token::Begin(_) => {
if k > 0 {
let popped = self.scan_pop();
self.size[popped] = self.size[x] + self.right_total;
self.check_stack(k - 1);
}
}
Token::End => {
// paper says + not =, but that makes no sense.
let popped = self.scan_pop();
self.size[popped] = 1;
self.check_stack(k + 1);
}
_ => {
let popped = self.scan_pop();
self.size[popped] = self.size[x] + self.right_total;
if k > 0 {
self.check_stack(k);
}
}
}
}
}
pub fn print_newline(&mut self, amount: isize) -> old_io::IoResult<()> {
debug!("NEWLINE {}", amount);
let ret = write!(self.out, "\n");
self.pending_indentation = 0;
self.indent(amount);
return ret;
}
pub fn indent(&mut self, amount: isize) {
debug!("INDENT {}", amount);
self.pending_indentation += amount;
}
pub fn get_top(&mut self) -> PrintStackElem {
let print_stack = &mut self.print_stack;
let n = print_stack.len();
if n!= 0 {
(*print_stack)[n - 1]
} else {
PrintStackElem {
offset: 0,
pbreak: PrintStackBreak::Broken(Breaks::Inconsistent)
}
}
}
pub fn print_str(&mut self, s: &str) -> old_io::IoResult<()> {
while self.pending_indentation > 0 {
try!(write!(self.out, " "));
self.pending_indentation -= 1;
}
write!(self.out, "{}", s)
}
pub fn print(&mut self, token: Token, l: isize) -> old_io::IoResult<()> {
debug!("print {} {} (remaining line space={})", tok_str(&token), l,
self.space);
debug!("{}", buf_str(&self.token[],
&self.size[],
self.left,
self.right,
6));
match token {
Token::Begin(b) => {
if l > self.space {
let col = self.margin - self.space + b.offset;
debug!("print Begin -> push broken block at col {}", col);
self.print_stack.push(PrintStackElem {
offset: col,
pbreak: PrintStackBreak::Broken(b.breaks)
});
} else {
debug!("print Begin -> push fitting block");
self.print_stack.push(PrintStackElem {
offset: 0,
pbreak: PrintStackBreak::Fits
});
}
Ok(())
}
Token::End => {
debug!("print End -> pop End");
let print_stack = &mut self.print_stack;
assert!((print_stack.len()!= 0));
print_stack.pop().unwrap();
Ok(())
}
Token::Break(b) => {
let top = self.get_top();
match top.pbreak {
PrintStackBreak::Fits => {
debug!("print Break({}) in fitting block", b.blank_space);
self.space -= b.blank_space;
self.indent(b.blank_space);
Ok(())
}
PrintStackBreak::Broken(Breaks::Consistent) => {
debug!("print Break({}+{}) in consistent block",
top.offset, b.offset);
let ret = self.print_newline(top.offset + b.offset);
self.space = self.margin - (top.offset + b.offset);
ret
}
PrintStackBreak::Broken(Breaks::Inconsistent) => {
if l > self.space {
debug!("print Break({}+{}) w/ newline in inconsistent",
top.offset, b.offset);
let ret = self.print_newline(top.offset + b.offset);
self.space = self.margin - (top.offset + b.offset);
ret
} else {
debug!("print Break({}) w/o newline in inconsistent",
b.blank_space);
self.indent(b.blank_space);
self.space -= b.blank_space;
Ok(())
}
}
}
}
Token::String(s, len) => {
debug!("print String({})", s);
assert_eq!(l, len);
// assert!(l <= space);
self.space -= len;
self.print_str(&s[])
}
Token::Eof => {
// Eof should never get here.
panic!();
}
}
}
}
// Convenience functions to talk to the printer.
//
// "raw box"
pub fn rbox(p: &mut Printer, indent: usize, b: Breaks) -> old_io::IoResult<()> {
p.pretty_print(Token::Begin(BeginToken {
offset: indent as isize,
breaks: b
}))
}
pub fn ibox(p: &mut Printer, indent: usize) -> old_io::IoResult<()> {
rbox(p, indent, Breaks::Inconsistent)
}
pub fn cbox(p: &mut Printer, indent: usize) -> old_io::IoResult<()> {
rbox(p, indent, Breaks::Consistent)
}
pub fn break_offset(p: &mut Printer, n: usize, off: isize) -> old_io::IoResult<()> {
p.pretty_print(Token::Break(BreakToken {
offset: off,
blank_space: n as isize
}))
}
pub fn end(p: &mut Printer) -> old_io::IoResult<()> {
p.pretty_print(Token::End)
}
pub fn eof(p: &mut Printer) -> old_io::IoResult<()> {
p.pretty_print(Token::Eof)
}
pub fn word(p: &mut Printer, wrd: &str) -> old_io::IoResult<()> {
p.pretty_print(Token::String(/* bad */ wrd.to_string(), wrd.len() as isize))
}
pub fn huge_word(p: &mut Printer, wrd: &str) -> old_io::IoResult<()> {
p.pretty_print(Token::String(/* bad */ wrd.to_string(), SIZE_INFINITY))
}
pub fn zero_word(p: &mut Printer, wrd: &str) -> old_io::IoResult<()> {
p.pretty_print(Token::String(/* bad */ wrd.to_string(), 0))
}
pub fn spaces(p: &mut Printer, n: usize) -> old_io::IoResult<()> {
break_offset(p, n, 0)
}
pub fn zerobreak(p: &mut Printer) -> old_io::IoResult<()> {
spaces(p, 0)
}
pub fn space(p: &mut Printer) -> old_io::IoResult<()> {
spaces(p, 1)
|
}
pub fn hardbreak(p: &mut Printer) -> old_io::IoResult<()> {
spaces(p, SIZE_INFINITY as usize)
}
|
random_line_split
|
|
pp.rs
|
algorithm decides to break
//! there anyways (because the functions themselves are long) you wind up with
//! extra blank lines. If you don't put hardbreaks you can wind up with the
//! "thing which should be on its own line" not getting its own line in the
//! rare case of "really small functions" or such. This re-occurs with comments
//! and explicit blank lines. So in those cases we use a string with a payload
//! we want isolated to a line and an explicit length that's huge, surrounded
//! by two zero-length breaks. The algorithm will try its best to fit it on a
//! line (which it can't) and so naturally place the content on its own line to
//! avoid combining it with other lines and making matters even worse.
use std::old_io;
use std::string;
use std::iter::repeat;
#[derive(Clone, Copy, PartialEq)]
pub enum Breaks {
Consistent,
Inconsistent,
}
#[derive(Clone, Copy)]
pub struct BreakToken {
offset: isize,
blank_space: isize
}
#[derive(Clone, Copy)]
pub struct BeginToken {
offset: isize,
breaks: Breaks
}
#[derive(Clone)]
pub enum Token {
String(String, isize),
Break(BreakToken),
Begin(BeginToken),
End,
Eof,
}
impl Token {
pub fn is_eof(&self) -> bool {
match *self {
Token::Eof => true,
_ => false,
}
}
pub fn is_hardbreak_tok(&self) -> bool {
match *self {
Token::Break(BreakToken {
offset: 0,
blank_space: bs
}) if bs == SIZE_INFINITY =>
true,
_ =>
false
}
}
}
pub fn tok_str(token: &Token) -> String {
match *token {
Token::String(ref s, len) => format!("STR({},{})", s, len),
Token::Break(_) => "BREAK".to_string(),
Token::Begin(_) => "BEGIN".to_string(),
Token::End => "END".to_string(),
Token::Eof => "EOF".to_string()
}
}
pub fn buf_str(toks: &[Token],
szs: &[isize],
left: usize,
right: usize,
lim: usize)
-> String {
let n = toks.len();
assert_eq!(n, szs.len());
let mut i = left;
let mut l = lim;
let mut s = string::String::from_str("[");
while i!= right && l!= 0 {
l -= 1;
if i!= left {
s.push_str(", ");
}
s.push_str(&format!("{}={}",
szs[i],
tok_str(&toks[i]))[]);
i += 1;
i %= n;
}
s.push(']');
s
}
#[derive(Copy)]
pub enum PrintStackBreak {
Fits,
Broken(Breaks),
}
#[derive(Copy)]
pub struct PrintStackElem {
offset: isize,
pbreak: PrintStackBreak
}
static SIZE_INFINITY: isize = 0xffff;
pub fn mk_printer(out: Box<old_io::Writer+'static>, linewidth: usize) -> Printer {
// Yes 3, it makes the ring buffers big enough to never
// fall behind.
let n: usize = 3 * linewidth;
debug!("mk_printer {}", linewidth);
let token: Vec<Token> = repeat(Token::Eof).take(n).collect();
let size: Vec<isize> = repeat(0).take(n).collect();
let scan_stack: Vec<usize> = repeat(0us).take(n).collect();
Printer {
out: out,
buf_len: n,
margin: linewidth as isize,
space: linewidth as isize,
left: 0,
right: 0,
token: token,
size: size,
left_total: 0,
right_total: 0,
scan_stack: scan_stack,
scan_stack_empty: true,
top: 0,
bottom: 0,
print_stack: Vec::new(),
pending_indentation: 0
}
}
/// In case you do not have the paper, here is an explanation of what's going
/// on.
///
/// There is a stream of input tokens flowing through this printer.
///
/// The printer buffers up to 3N tokens inside itself, where N is linewidth.
/// Yes, linewidth is chars and tokens are multi-char, but in the worst
/// case every token worth buffering is 1 char long, so it's ok.
///
/// Tokens are String, Break, and Begin/End to delimit blocks.
///
/// Begin tokens can carry an offset, saying "how far to indent when you break
/// inside here", as well as a flag indicating "consistent" or "inconsistent"
/// breaking. Consistent breaking means that after the first break, no attempt
/// will be made to flow subsequent breaks together onto lines. Inconsistent
/// is the opposite. Inconsistent breaking example would be, say:
///
/// foo(hello, there, good, friends)
///
/// breaking inconsistently to become
///
/// foo(hello, there
/// good, friends);
///
/// whereas a consistent breaking would yield:
///
/// foo(hello,
/// there
/// good,
/// friends);
///
/// That is, in the consistent-break blocks we value vertical alignment
/// more than the ability to cram stuff onto a line. But in all cases if it
/// can make a block a one-liner, it'll do so.
///
/// Carrying on with high-level logic:
///
/// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
/// 'right' indices denote the active portion of the ring buffer as well as
/// describing hypothetical points-in-the-infinite-stream at most 3N tokens
/// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
/// between using 'left' and 'right' terms to denote the wrapped-to-ring-buffer
/// and point-in-infinite-stream senses freely.
///
/// There is a parallel ring buffer,'size', that holds the calculated size of
/// each token. Why calculated? Because for Begin/End pairs, the "size"
/// includes everything between the pair. That is, the "size" of Begin is
/// actually the sum of the sizes of everything between Begin and the paired
/// End that follows. Since that is arbitrarily far in the future,'size' is
/// being rewritten regularly while the printer runs; in fact most of the
/// machinery is here to work out'size' entries on the fly (and give up when
/// they're so obviously over-long that "infinity" is a good enough
/// approximation for purposes of line breaking).
///
/// The "input side" of the printer is managed as an abstract process called
/// SCAN, which uses'scan_stack','scan_stack_empty', 'top' and 'bottom', to
/// manage calculating'size'. SCAN is, in other words, the process of
/// calculating'size' entries.
///
/// The "output side" of the printer is managed by an abstract process called
/// PRINT, which uses 'print_stack','margin' and'space' to figure out what to
/// do with each token/size pair it consumes as it goes. It's trying to consume
/// the entire buffered window, but can't output anything until the size is >=
/// 0 (sizes are set to negative while they're pending calculation).
///
/// So SCAN takes input and buffers tokens and pending calculations, while
/// PRINT gobbles up completed calculations and tokens from the buffer. The
/// theory is that the two can never get more than 3N tokens apart, because
/// once there's "obviously" too much data to fit on a line, in a size
/// calculation, SCAN will write "infinity" to the size and let PRINT consume
/// it.
///
/// In this implementation (following the paper, again) the SCAN process is
/// the method called 'pretty_print', and the 'PRINT' process is the method
/// called 'print'.
pub struct Printer {
pub out: Box<old_io::Writer+'static>,
buf_len: usize,
/// Width of lines we're constrained to
margin: isize,
/// Number of spaces left on line
space: isize,
/// Index of left side of input stream
left: usize,
/// Index of right side of input stream
right: usize,
/// Ring-buffer stream goes through
token: Vec<Token>,
/// Ring-buffer of calculated sizes
size: Vec<isize>,
/// Running size of stream "...left"
left_total: isize,
/// Running size of stream "...right"
right_total: isize,
/// Pseudo-stack, really a ring too. Holds the
/// primary-ring-buffers index of the Begin that started the
/// current block, possibly with the most recent Break after that
/// Begin (if there is any) on top of it. Stuff is flushed off the
/// bottom as it becomes irrelevant due to the primary ring-buffer
/// advancing.
scan_stack: Vec<usize>,
/// Top==bottom disambiguator
scan_stack_empty: bool,
/// Index of top of scan_stack
top: usize,
/// Index of bottom of scan_stack
bottom: usize,
/// Stack of blocks-in-progress being flushed by print
print_stack: Vec<PrintStackElem>,
/// Buffered indentation to avoid writing trailing whitespace
pending_indentation: isize,
}
impl Printer {
pub fn last_token(&mut self) -> Token
|
// be very careful with this!
pub fn replace_last_token(&mut self, t: Token) {
self.token[self.right] = t;
}
pub fn pretty_print(&mut self, token: Token) -> old_io::IoResult<()> {
debug!("pp ~[{},{}]", self.left, self.right);
match token {
Token::Eof => {
if!self.scan_stack_empty {
self.check_stack(0);
try!(self.advance_left());
}
self.indent(0);
Ok(())
}
Token::Begin(b) => {
if self.scan_stack_empty {
self.left_total = 1;
self.right_total = 1;
self.left = 0;
self.right = 0;
} else { self.advance_right(); }
debug!("pp Begin({})/buffer ~[{},{}]",
b.offset, self.left, self.right);
self.token[self.right] = token;
self.size[self.right] = -self.right_total;
let right = self.right;
self.scan_push(right);
Ok(())
}
Token::End => {
if self.scan_stack_empty {
debug!("pp End/print ~[{},{}]", self.left, self.right);
self.print(token, 0)
} else {
debug!("pp End/buffer ~[{},{}]", self.left, self.right);
self.advance_right();
self.token[self.right] = token;
self.size[self.right] = -1;
let right = self.right;
self.scan_push(right);
Ok(())
}
}
Token::Break(b) => {
if self.scan_stack_empty {
self.left_total = 1;
self.right_total = 1;
self.left = 0;
self.right = 0;
} else { self.advance_right(); }
debug!("pp Break({})/buffer ~[{},{}]",
b.offset, self.left, self.right);
self.check_stack(0);
let right = self.right;
self.scan_push(right);
self.token[self.right] = token;
self.size[self.right] = -self.right_total;
self.right_total += b.blank_space;
Ok(())
}
Token::String(s, len) => {
if self.scan_stack_empty {
debug!("pp String('{}')/print ~[{},{}]",
s, self.left, self.right);
self.print(Token::String(s, len), len)
} else {
debug!("pp String('{}')/buffer ~[{},{}]",
s, self.left, self.right);
self.advance_right();
self.token[self.right] = Token::String(s, len);
self.size[self.right] = len;
self.right_total += len;
self.check_stream()
}
}
}
}
pub fn check_stream(&mut self) -> old_io::IoResult<()> {
debug!("check_stream ~[{}, {}] with left_total={}, right_total={}",
self.left, self.right, self.left_total, self.right_total);
if self.right_total - self.left_total > self.space {
debug!("scan window is {}, longer than space on line ({})",
self.right_total - self.left_total, self.space);
if!self.scan_stack_empty {
if self.left == self.scan_stack[self.bottom] {
debug!("setting {} to infinity and popping", self.left);
let scanned = self.scan_pop_bottom();
self.size[scanned] = SIZE_INFINITY;
}
}
try!(self.advance_left());
if self.left!= self.right {
try!(self.check_stream());
}
}
Ok(())
}
pub fn scan_push(&mut self, x: usize) {
debug!("scan_push {}", x);
if self.scan_stack_empty {
self.scan_stack_empty = false;
} else {
self.top += 1;
self.top %= self.buf_len;
assert!((self.top!= self.bottom));
}
self.scan_stack[self.top] = x;
}
pub fn scan_pop(&mut self) -> usize {
assert!((!self.scan_stack_empty));
let x = self.scan_stack[self.top];
if self.top == self.bottom {
self.scan_stack_empty = true;
} else {
self.top += self.buf_len - 1; self.top %= self.buf_len;
}
return x;
}
pub fn scan_top(&mut self) -> usize {
assert!((!self.scan_stack_empty));
return self.scan_stack[self.top];
}
pub fn scan_pop_bottom(&mut self) -> usize {
assert!((!self.scan_stack_empty));
let x = self.scan_stack[self.bottom];
if self.top == self.bottom {
self.scan_stack_empty = true;
} else {
self.bottom += 1; self.bottom %= self.buf_len;
}
return x;
}
pub fn advance_right(&mut self) {
self.right += 1;
self.right %= self.buf_len;
assert!((self.right!= self.left));
}
pub fn advance_left(&mut self) -> old_io::IoResult<()> {
debug!("advance_left ~[{},{}], sizeof({})={}", self.left, self.right,
self.left, self.size[self.left]);
let mut left_size = self.size[self.left];
while left_size >= 0 {
let left = self.token[self.left].clone();
let len = match left {
Token::Break(b) => b.blank_space,
Token::String(_, len) => {
assert_eq!(len, left_size);
len
}
_ => 0
};
try!(self.print(left, left_size));
self.left_total += len;
if self.left == self.right {
break;
}
self.left += 1;
self.left %= self.buf_len;
left_size = self.size[self.left];
}
Ok(())
}
pub fn check_stack(&mut self, k: isize) {
if!self.scan_stack_empty {
let x = self.scan_top();
match self.token[x] {
Token::Begin(_) => {
if k > 0 {
let popped = self.scan_pop();
self.size[popped] = self.size[x] + self.right_total;
self.check_stack(k - 1);
}
}
Token::End => {
// paper says + not =, but that makes no sense.
let popped = self.scan_pop();
self.size[popped] = 1;
self.check_stack(k + 1);
}
_ => {
let popped = self.scan_pop();
self.size[popped] = self.size[x] + self.right_total;
if k > 0 {
self.check_stack(k);
}
}
}
}
}
pub fn print_newline(&mut self, amount: isize) -> old_io::IoResult<()> {
debug!("NEWLINE {}", amount);
let ret = write!(self.out, "\n");
self.pending_indentation = 0;
self.indent(amount);
return ret;
}
pub fn indent(&mut self, amount: isize) {
debug!("INDENT {}", amount);
self.pending_indentation += amount;
}
pub fn get_top(&mut self) -> PrintStackElem {
let print_stack = &mut self.print_stack;
let n = print_stack.len();
if n!= 0 {
(*print_stack)[n - 1]
} else {
PrintStackElem {
offset: 0,
pbreak: PrintStackBreak::Broken(Breaks::Inconsistent)
}
}
}
pub fn print_str(&mut self, s: &str) -> old_io::IoResult<()> {
while self.pending_indentation > 0 {
try!(write!(self.out, " "));
self.pending_indentation -= 1;
}
write!(self.out, "{}", s)
}
pub fn print(&mut self, token: Token, l: isize) -> old_io::IoResult<()> {
debug!("print {} {} (remaining line space={})", tok_str(&token), l,
self.space);
debug!("{}", buf_str(&self.token[],
&self.size[],
self.left,
self.right,
6));
match token {
Token::Begin(b) => {
if l > self.space {
let col = self.margin - self.space + b.offset;
debug!("print Begin -> push broken block at col {}", col);
self.print_stack.push(PrintStackElem {
offset: col,
pbreak: PrintStackBreak::Broken(b.breaks)
});
} else {
debug!("print Begin -> push fitting block");
self.print_stack.push(PrintStackElem {
offset: 0,
pbreak: PrintStackBreak::Fits
});
}
Ok(())
}
Token::End => {
debug!("print End -> pop End");
let print_stack = &mut self.print_stack;
assert!((print_stack.len()!= 0));
print_stack.pop().unwrap();
Ok(())
}
Token::Break(b) => {
let top = self.get_top();
match top.pbreak {
PrintStackBreak::Fits => {
debug!("print Break({}) in fitting block", b.blank_space);
|
{
self.token[self.right].clone()
}
|
identifier_body
|
pp.rs
|
algorithm decides to break
//! there anyways (because the functions themselves are long) you wind up with
//! extra blank lines. If you don't put hardbreaks you can wind up with the
//! "thing which should be on its own line" not getting its own line in the
//! rare case of "really small functions" or such. This re-occurs with comments
//! and explicit blank lines. So in those cases we use a string with a payload
//! we want isolated to a line and an explicit length that's huge, surrounded
//! by two zero-length breaks. The algorithm will try its best to fit it on a
//! line (which it can't) and so naturally place the content on its own line to
//! avoid combining it with other lines and making matters even worse.
use std::old_io;
use std::string;
use std::iter::repeat;
#[derive(Clone, Copy, PartialEq)]
pub enum Breaks {
Consistent,
Inconsistent,
}
#[derive(Clone, Copy)]
pub struct BreakToken {
offset: isize,
blank_space: isize
}
#[derive(Clone, Copy)]
pub struct BeginToken {
offset: isize,
breaks: Breaks
}
#[derive(Clone)]
pub enum Token {
String(String, isize),
Break(BreakToken),
Begin(BeginToken),
End,
Eof,
}
impl Token {
pub fn is_eof(&self) -> bool {
match *self {
Token::Eof => true,
_ => false,
}
}
pub fn is_hardbreak_tok(&self) -> bool {
match *self {
Token::Break(BreakToken {
offset: 0,
blank_space: bs
}) if bs == SIZE_INFINITY =>
true,
_ =>
false
}
}
}
pub fn tok_str(token: &Token) -> String {
match *token {
Token::String(ref s, len) => format!("STR({},{})", s, len),
Token::Break(_) => "BREAK".to_string(),
Token::Begin(_) => "BEGIN".to_string(),
Token::End => "END".to_string(),
Token::Eof => "EOF".to_string()
}
}
pub fn buf_str(toks: &[Token],
szs: &[isize],
left: usize,
right: usize,
lim: usize)
-> String {
let n = toks.len();
assert_eq!(n, szs.len());
let mut i = left;
let mut l = lim;
let mut s = string::String::from_str("[");
while i!= right && l!= 0 {
l -= 1;
if i!= left {
s.push_str(", ");
}
s.push_str(&format!("{}={}",
szs[i],
tok_str(&toks[i]))[]);
i += 1;
i %= n;
}
s.push(']');
s
}
#[derive(Copy)]
pub enum PrintStackBreak {
Fits,
Broken(Breaks),
}
#[derive(Copy)]
pub struct PrintStackElem {
offset: isize,
pbreak: PrintStackBreak
}
static SIZE_INFINITY: isize = 0xffff;
pub fn mk_printer(out: Box<old_io::Writer+'static>, linewidth: usize) -> Printer {
// Yes 3, it makes the ring buffers big enough to never
// fall behind.
let n: usize = 3 * linewidth;
debug!("mk_printer {}", linewidth);
let token: Vec<Token> = repeat(Token::Eof).take(n).collect();
let size: Vec<isize> = repeat(0).take(n).collect();
let scan_stack: Vec<usize> = repeat(0us).take(n).collect();
Printer {
out: out,
buf_len: n,
margin: linewidth as isize,
space: linewidth as isize,
left: 0,
right: 0,
token: token,
size: size,
left_total: 0,
right_total: 0,
scan_stack: scan_stack,
scan_stack_empty: true,
top: 0,
bottom: 0,
print_stack: Vec::new(),
pending_indentation: 0
}
}
/// In case you do not have the paper, here is an explanation of what's going
/// on.
///
/// There is a stream of input tokens flowing through this printer.
///
/// The printer buffers up to 3N tokens inside itself, where N is linewidth.
/// Yes, linewidth is chars and tokens are multi-char, but in the worst
/// case every token worth buffering is 1 char long, so it's ok.
///
/// Tokens are String, Break, and Begin/End to delimit blocks.
///
/// Begin tokens can carry an offset, saying "how far to indent when you break
/// inside here", as well as a flag indicating "consistent" or "inconsistent"
/// breaking. Consistent breaking means that after the first break, no attempt
/// will be made to flow subsequent breaks together onto lines. Inconsistent
/// is the opposite. Inconsistent breaking example would be, say:
///
/// foo(hello, there, good, friends)
///
/// breaking inconsistently to become
///
/// foo(hello, there
/// good, friends);
///
/// whereas a consistent breaking would yield:
///
/// foo(hello,
/// there
/// good,
/// friends);
///
/// That is, in the consistent-break blocks we value vertical alignment
/// more than the ability to cram stuff onto a line. But in all cases if it
/// can make a block a one-liner, it'll do so.
///
/// Carrying on with high-level logic:
///
/// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
/// 'right' indices denote the active portion of the ring buffer as well as
/// describing hypothetical points-in-the-infinite-stream at most 3N tokens
/// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
/// between using 'left' and 'right' terms to denote the wrapped-to-ring-buffer
/// and point-in-infinite-stream senses freely.
///
/// There is a parallel ring buffer,'size', that holds the calculated size of
/// each token. Why calculated? Because for Begin/End pairs, the "size"
/// includes everything between the pair. That is, the "size" of Begin is
/// actually the sum of the sizes of everything between Begin and the paired
/// End that follows. Since that is arbitrarily far in the future,'size' is
/// being rewritten regularly while the printer runs; in fact most of the
/// machinery is here to work out'size' entries on the fly (and give up when
/// they're so obviously over-long that "infinity" is a good enough
/// approximation for purposes of line breaking).
///
/// The "input side" of the printer is managed as an abstract process called
/// SCAN, which uses'scan_stack','scan_stack_empty', 'top' and 'bottom', to
/// manage calculating'size'. SCAN is, in other words, the process of
/// calculating'size' entries.
///
/// The "output side" of the printer is managed by an abstract process called
/// PRINT, which uses 'print_stack','margin' and'space' to figure out what to
/// do with each token/size pair it consumes as it goes. It's trying to consume
/// the entire buffered window, but can't output anything until the size is >=
/// 0 (sizes are set to negative while they're pending calculation).
///
/// So SCAN takes input and buffers tokens and pending calculations, while
/// PRINT gobbles up completed calculations and tokens from the buffer. The
/// theory is that the two can never get more than 3N tokens apart, because
/// once there's "obviously" too much data to fit on a line, in a size
/// calculation, SCAN will write "infinity" to the size and let PRINT consume
/// it.
///
/// In this implementation (following the paper, again) the SCAN process is
/// the method called 'pretty_print', and the 'PRINT' process is the method
/// called 'print'.
pub struct Printer {
pub out: Box<old_io::Writer+'static>,
buf_len: usize,
/// Width of lines we're constrained to
margin: isize,
/// Number of spaces left on line
space: isize,
/// Index of left side of input stream
left: usize,
/// Index of right side of input stream
right: usize,
/// Ring-buffer stream goes through
token: Vec<Token>,
/// Ring-buffer of calculated sizes
size: Vec<isize>,
/// Running size of stream "...left"
left_total: isize,
/// Running size of stream "...right"
right_total: isize,
/// Pseudo-stack, really a ring too. Holds the
/// primary-ring-buffers index of the Begin that started the
/// current block, possibly with the most recent Break after that
/// Begin (if there is any) on top of it. Stuff is flushed off the
/// bottom as it becomes irrelevant due to the primary ring-buffer
/// advancing.
scan_stack: Vec<usize>,
/// Top==bottom disambiguator
scan_stack_empty: bool,
/// Index of top of scan_stack
top: usize,
/// Index of bottom of scan_stack
bottom: usize,
/// Stack of blocks-in-progress being flushed by print
print_stack: Vec<PrintStackElem>,
/// Buffered indentation to avoid writing trailing whitespace
pending_indentation: isize,
}
impl Printer {
pub fn last_token(&mut self) -> Token {
self.token[self.right].clone()
}
// be very careful with this!
pub fn replace_last_token(&mut self, t: Token) {
self.token[self.right] = t;
}
pub fn pretty_print(&mut self, token: Token) -> old_io::IoResult<()> {
debug!("pp ~[{},{}]", self.left, self.right);
match token {
Token::Eof => {
if!self.scan_stack_empty {
self.check_stack(0);
try!(self.advance_left());
}
self.indent(0);
Ok(())
}
Token::Begin(b) => {
if self.scan_stack_empty {
self.left_total = 1;
self.right_total = 1;
self.left = 0;
self.right = 0;
} else { self.advance_right(); }
debug!("pp Begin({})/buffer ~[{},{}]",
b.offset, self.left, self.right);
self.token[self.right] = token;
self.size[self.right] = -self.right_total;
let right = self.right;
self.scan_push(right);
Ok(())
}
Token::End => {
if self.scan_stack_empty {
debug!("pp End/print ~[{},{}]", self.left, self.right);
self.print(token, 0)
} else {
debug!("pp End/buffer ~[{},{}]", self.left, self.right);
self.advance_right();
self.token[self.right] = token;
self.size[self.right] = -1;
let right = self.right;
self.scan_push(right);
Ok(())
}
}
Token::Break(b) => {
if self.scan_stack_empty {
self.left_total = 1;
self.right_total = 1;
self.left = 0;
self.right = 0;
} else { self.advance_right(); }
debug!("pp Break({})/buffer ~[{},{}]",
b.offset, self.left, self.right);
self.check_stack(0);
let right = self.right;
self.scan_push(right);
self.token[self.right] = token;
self.size[self.right] = -self.right_total;
self.right_total += b.blank_space;
Ok(())
}
Token::String(s, len) => {
if self.scan_stack_empty {
debug!("pp String('{}')/print ~[{},{}]",
s, self.left, self.right);
self.print(Token::String(s, len), len)
} else
|
}
}
}
pub fn check_stream(&mut self) -> old_io::IoResult<()> {
debug!("check_stream ~[{}, {}] with left_total={}, right_total={}",
self.left, self.right, self.left_total, self.right_total);
if self.right_total - self.left_total > self.space {
debug!("scan window is {}, longer than space on line ({})",
self.right_total - self.left_total, self.space);
if!self.scan_stack_empty {
if self.left == self.scan_stack[self.bottom] {
debug!("setting {} to infinity and popping", self.left);
let scanned = self.scan_pop_bottom();
self.size[scanned] = SIZE_INFINITY;
}
}
try!(self.advance_left());
if self.left!= self.right {
try!(self.check_stream());
}
}
Ok(())
}
pub fn scan_push(&mut self, x: usize) {
debug!("scan_push {}", x);
if self.scan_stack_empty {
self.scan_stack_empty = false;
} else {
self.top += 1;
self.top %= self.buf_len;
assert!((self.top!= self.bottom));
}
self.scan_stack[self.top] = x;
}
pub fn scan_pop(&mut self) -> usize {
assert!((!self.scan_stack_empty));
let x = self.scan_stack[self.top];
if self.top == self.bottom {
self.scan_stack_empty = true;
} else {
self.top += self.buf_len - 1; self.top %= self.buf_len;
}
return x;
}
pub fn scan_top(&mut self) -> usize {
assert!((!self.scan_stack_empty));
return self.scan_stack[self.top];
}
pub fn scan_pop_bottom(&mut self) -> usize {
assert!((!self.scan_stack_empty));
let x = self.scan_stack[self.bottom];
if self.top == self.bottom {
self.scan_stack_empty = true;
} else {
self.bottom += 1; self.bottom %= self.buf_len;
}
return x;
}
pub fn advance_right(&mut self) {
self.right += 1;
self.right %= self.buf_len;
assert!((self.right!= self.left));
}
pub fn advance_left(&mut self) -> old_io::IoResult<()> {
debug!("advance_left ~[{},{}], sizeof({})={}", self.left, self.right,
self.left, self.size[self.left]);
let mut left_size = self.size[self.left];
while left_size >= 0 {
let left = self.token[self.left].clone();
let len = match left {
Token::Break(b) => b.blank_space,
Token::String(_, len) => {
assert_eq!(len, left_size);
len
}
_ => 0
};
try!(self.print(left, left_size));
self.left_total += len;
if self.left == self.right {
break;
}
self.left += 1;
self.left %= self.buf_len;
left_size = self.size[self.left];
}
Ok(())
}
pub fn check_stack(&mut self, k: isize) {
if!self.scan_stack_empty {
let x = self.scan_top();
match self.token[x] {
Token::Begin(_) => {
if k > 0 {
let popped = self.scan_pop();
self.size[popped] = self.size[x] + self.right_total;
self.check_stack(k - 1);
}
}
Token::End => {
// paper says + not =, but that makes no sense.
let popped = self.scan_pop();
self.size[popped] = 1;
self.check_stack(k + 1);
}
_ => {
let popped = self.scan_pop();
self.size[popped] = self.size[x] + self.right_total;
if k > 0 {
self.check_stack(k);
}
}
}
}
}
pub fn print_newline(&mut self, amount: isize) -> old_io::IoResult<()> {
debug!("NEWLINE {}", amount);
let ret = write!(self.out, "\n");
self.pending_indentation = 0;
self.indent(amount);
return ret;
}
pub fn indent(&mut self, amount: isize) {
debug!("INDENT {}", amount);
self.pending_indentation += amount;
}
pub fn get_top(&mut self) -> PrintStackElem {
let print_stack = &mut self.print_stack;
let n = print_stack.len();
if n!= 0 {
(*print_stack)[n - 1]
} else {
PrintStackElem {
offset: 0,
pbreak: PrintStackBreak::Broken(Breaks::Inconsistent)
}
}
}
pub fn print_str(&mut self, s: &str) -> old_io::IoResult<()> {
while self.pending_indentation > 0 {
try!(write!(self.out, " "));
self.pending_indentation -= 1;
}
write!(self.out, "{}", s)
}
pub fn print(&mut self, token: Token, l: isize) -> old_io::IoResult<()> {
debug!("print {} {} (remaining line space={})", tok_str(&token), l,
self.space);
debug!("{}", buf_str(&self.token[],
&self.size[],
self.left,
self.right,
6));
match token {
Token::Begin(b) => {
if l > self.space {
let col = self.margin - self.space + b.offset;
debug!("print Begin -> push broken block at col {}", col);
self.print_stack.push(PrintStackElem {
offset: col,
pbreak: PrintStackBreak::Broken(b.breaks)
});
} else {
debug!("print Begin -> push fitting block");
self.print_stack.push(PrintStackElem {
offset: 0,
pbreak: PrintStackBreak::Fits
});
}
Ok(())
}
Token::End => {
debug!("print End -> pop End");
let print_stack = &mut self.print_stack;
assert!((print_stack.len()!= 0));
print_stack.pop().unwrap();
Ok(())
}
Token::Break(b) => {
let top = self.get_top();
match top.pbreak {
PrintStackBreak::Fits => {
debug!("print Break({}) in fitting block", b.blank_space);
|
{
debug!("pp String('{}')/buffer ~[{},{}]",
s, self.left, self.right);
self.advance_right();
self.token[self.right] = Token::String(s, len);
self.size[self.right] = len;
self.right_total += len;
self.check_stream()
}
|
conditional_block
|
ty.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
A mini version of ast::Ty, which is easier to use, and features an
explicit `Self` type to use when specifying impls to be derived.
*/
pub use self::PtrTy::*;
pub use self::Ty::*;
use ast;
use ast::{Expr,Generics,Ident};
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use codemap::{Span,respan};
use owned_slice::OwnedSlice;
use parse::token::special_idents;
use ptr::P;
/// The types of pointers
#[deriving(Clone)]
pub enum PtrTy<'a> {
/// &'lifetime mut
Borrowed(Option<&'a str>, ast::Mutability),
/// *mut
Raw(ast::Mutability),
}
/// A path, e.g. `::std::option::Option::<int>` (global). Has support
/// for type parameters and a lifetime.
#[deriving(Clone)]
pub struct Path<'a> {
pub path: Vec<&'a str>,
pub lifetime: Option<&'a str>,
pub params: Vec<Box<Ty<'a>>>,
pub global: bool,
}
impl<'a> Path<'a> {
pub fn new<'r>(path: Vec<&'r str> ) -> Path<'r> {
Path::new_(path, None, Vec::new(), true)
}
pub fn new_local<'r>(path: &'r str) -> Path<'r> {
Path::new_(vec!( path ), None, Vec::new(), false)
}
pub fn new_<'r>(path: Vec<&'r str>,
lifetime: Option<&'r str>,
params: Vec<Box<Ty<'r>>>,
global: bool)
-> Path<'r> {
Path {
path: path,
lifetime: lifetime,
params: params,
global: global
}
}
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
let idents = self.path.iter().map(|s| cx.ident_of(*s)).collect();
let lt = mk_lifetimes(cx, span, &self.lifetime);
let tys = self.params.iter().map(|t| t.to_ty(cx, span, self_ty, self_generics)).collect();
cx.path_all(span, self.global, idents, lt, tys)
}
}
/// A type. Supports pointers, Self, and literals
#[deriving(Clone)]
pub enum Ty<'a> {
Self,
/// &/Box/ Ty
Ptr(Box<Ty<'a>>, PtrTy<'a>),
/// mod::mod::Type<[lifetime], [Params...]>, including a plain type
/// parameter, and things like `int`
Literal(Path<'a>),
/// includes unit
Tuple(Vec<Ty<'a>> )
}
pub fn borrowed_ptrty<'r>() -> PtrTy<'r> {
Borrowed(None, ast::MutImmutable)
}
|
}
pub fn borrowed_explicit_self<'r>() -> Option<Option<PtrTy<'r>>> {
Some(Some(borrowed_ptrty()))
}
pub fn borrowed_self<'r>() -> Ty<'r> {
borrowed(box Self)
}
pub fn nil_ty<'r>() -> Ty<'r> {
Tuple(Vec::new())
}
fn mk_lifetime(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Option<ast::Lifetime> {
match *lt {
Some(ref s) => Some(cx.lifetime(span, cx.ident_of(*s).name)),
None => None
}
}
fn mk_lifetimes(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Vec<ast::Lifetime> {
match *lt {
Some(ref s) => vec!(cx.lifetime(span, cx.ident_of(*s).name)),
None => vec!()
}
}
impl<'a> Ty<'a> {
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
match *self {
Ptr(ref ty, ref ptr) => {
let raw_ty = ty.to_ty(cx, span, self_ty, self_generics);
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = mk_lifetime(cx, span, lt);
cx.ty_rptr(span, raw_ty, lt, mutbl)
}
Raw(mutbl) => cx.ty_ptr(span, raw_ty, mutbl)
}
}
Literal(ref p) => { p.to_ty(cx, span, self_ty, self_generics) }
Self => {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
Tuple(ref fields) => {
let ty = ast::TyTup(fields.iter()
.map(|f| f.to_ty(cx, span, self_ty, self_generics))
.collect());
cx.ty(span, ty)
}
}
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
match *self {
Self => {
let self_params = self_generics.ty_params.map(|ty_param| {
cx.ty_ident(span, ty_param.ident)
});
let lifetimes = self_generics.lifetimes.iter()
.map(|d| d.lifetime)
.collect();
cx.path_all(span, false, vec!(self_ty), lifetimes,
self_params.into_vec())
}
Literal(ref p) => {
p.to_path(cx, span, self_ty, self_generics)
}
Ptr(..) => { cx.span_bug(span, "pointer in a path in generic `deriving`") }
Tuple(..) => { cx.span_bug(span, "tuple in a path in generic `deriving`") }
}
}
}
fn mk_ty_param(cx: &ExtCtxt, span: Span, name: &str,
bounds: &[Path], unbound: Option<ast::TraitRef>,
self_ident: Ident, self_generics: &Generics) -> ast::TyParam {
let bounds =
bounds.iter().map(|b| {
let path = b.to_path(cx, span, self_ident, self_generics);
cx.typarambound(path)
}).collect();
cx.typaram(span, cx.ident_of(name), bounds, unbound, None)
}
fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>)
-> Generics {
Generics {
lifetimes: lifetimes,
ty_params: OwnedSlice::from_vec(ty_params),
where_clause: ast::WhereClause {
id: ast::DUMMY_NODE_ID,
predicates: Vec::new(),
},
}
}
/// Lifetimes and bounds on type parameters
#[deriving(Clone)]
pub struct LifetimeBounds<'a> {
pub lifetimes: Vec<(&'a str, Vec<&'a str>)>,
pub bounds: Vec<(&'a str, Option<ast::TraitRef>, Vec<Path<'a>>)>,
}
impl<'a> LifetimeBounds<'a> {
pub fn empty() -> LifetimeBounds<'a> {
LifetimeBounds {
lifetimes: Vec::new(), bounds: Vec::new()
}
}
pub fn to_generics(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> Generics {
let lifetimes = self.lifetimes.iter().map(|&(ref lt, ref bounds)| {
let bounds =
bounds.iter().map(
|b| cx.lifetime(span, cx.ident_of(*b).name)).collect();
cx.lifetime_def(span, cx.ident_of(*lt).name, bounds)
}).collect();
let ty_params = self.bounds.iter().map(|t| {
match t {
&(ref name, ref unbound, ref bounds) => {
mk_ty_param(cx,
span,
*name,
bounds.as_slice(),
unbound.clone(),
self_ty,
self_generics)
}
}
}).collect();
mk_generics(lifetimes, ty_params)
}
}
pub fn get_explicit_self(cx: &ExtCtxt, span: Span, self_ptr: &Option<PtrTy>)
-> (P<Expr>, ast::ExplicitSelf) {
// this constructs a fresh `self` path, which will match the fresh `self` binding
// created below.
let self_path = cx.expr_self(span);
match *self_ptr {
None => {
(self_path, respan(span, ast::SelfValue(special_idents::self_)))
}
Some(ref ptr) => {
let self_ty = respan(
span,
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = lt.map(|s| cx.lifetime(span, cx.ident_of(s).name));
ast::SelfRegion(lt, mutbl, special_idents::self_)
}
Raw(_) => cx.span_bug(span, "attempted to use *self in deriving definition")
});
let self_expr = cx.expr_deref(span, self_path);
(self_expr, self_ty)
}
}
}
|
pub fn borrowed<'r>(ty: Box<Ty<'r>>) -> Ty<'r> {
Ptr(ty, borrowed_ptrty())
|
random_line_split
|
ty.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
A mini version of ast::Ty, which is easier to use, and features an
explicit `Self` type to use when specifying impls to be derived.
*/
pub use self::PtrTy::*;
pub use self::Ty::*;
use ast;
use ast::{Expr,Generics,Ident};
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use codemap::{Span,respan};
use owned_slice::OwnedSlice;
use parse::token::special_idents;
use ptr::P;
/// The types of pointers
#[deriving(Clone)]
pub enum PtrTy<'a> {
/// &'lifetime mut
Borrowed(Option<&'a str>, ast::Mutability),
/// *mut
Raw(ast::Mutability),
}
/// A path, e.g. `::std::option::Option::<int>` (global). Has support
/// for type parameters and a lifetime.
#[deriving(Clone)]
pub struct Path<'a> {
pub path: Vec<&'a str>,
pub lifetime: Option<&'a str>,
pub params: Vec<Box<Ty<'a>>>,
pub global: bool,
}
impl<'a> Path<'a> {
pub fn new<'r>(path: Vec<&'r str> ) -> Path<'r> {
Path::new_(path, None, Vec::new(), true)
}
pub fn new_local<'r>(path: &'r str) -> Path<'r> {
Path::new_(vec!( path ), None, Vec::new(), false)
}
pub fn new_<'r>(path: Vec<&'r str>,
lifetime: Option<&'r str>,
params: Vec<Box<Ty<'r>>>,
global: bool)
-> Path<'r> {
Path {
path: path,
lifetime: lifetime,
params: params,
global: global
}
}
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
let idents = self.path.iter().map(|s| cx.ident_of(*s)).collect();
let lt = mk_lifetimes(cx, span, &self.lifetime);
let tys = self.params.iter().map(|t| t.to_ty(cx, span, self_ty, self_generics)).collect();
cx.path_all(span, self.global, idents, lt, tys)
}
}
/// A type. Supports pointers, Self, and literals
#[deriving(Clone)]
pub enum Ty<'a> {
Self,
/// &/Box/ Ty
Ptr(Box<Ty<'a>>, PtrTy<'a>),
/// mod::mod::Type<[lifetime], [Params...]>, including a plain type
/// parameter, and things like `int`
Literal(Path<'a>),
/// includes unit
Tuple(Vec<Ty<'a>> )
}
pub fn borrowed_ptrty<'r>() -> PtrTy<'r> {
Borrowed(None, ast::MutImmutable)
}
pub fn borrowed<'r>(ty: Box<Ty<'r>>) -> Ty<'r> {
Ptr(ty, borrowed_ptrty())
}
pub fn borrowed_explicit_self<'r>() -> Option<Option<PtrTy<'r>>> {
Some(Some(borrowed_ptrty()))
}
pub fn borrowed_self<'r>() -> Ty<'r>
|
pub fn nil_ty<'r>() -> Ty<'r> {
Tuple(Vec::new())
}
fn mk_lifetime(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Option<ast::Lifetime> {
match *lt {
Some(ref s) => Some(cx.lifetime(span, cx.ident_of(*s).name)),
None => None
}
}
fn mk_lifetimes(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Vec<ast::Lifetime> {
match *lt {
Some(ref s) => vec!(cx.lifetime(span, cx.ident_of(*s).name)),
None => vec!()
}
}
impl<'a> Ty<'a> {
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
match *self {
Ptr(ref ty, ref ptr) => {
let raw_ty = ty.to_ty(cx, span, self_ty, self_generics);
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = mk_lifetime(cx, span, lt);
cx.ty_rptr(span, raw_ty, lt, mutbl)
}
Raw(mutbl) => cx.ty_ptr(span, raw_ty, mutbl)
}
}
Literal(ref p) => { p.to_ty(cx, span, self_ty, self_generics) }
Self => {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
Tuple(ref fields) => {
let ty = ast::TyTup(fields.iter()
.map(|f| f.to_ty(cx, span, self_ty, self_generics))
.collect());
cx.ty(span, ty)
}
}
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
match *self {
Self => {
let self_params = self_generics.ty_params.map(|ty_param| {
cx.ty_ident(span, ty_param.ident)
});
let lifetimes = self_generics.lifetimes.iter()
.map(|d| d.lifetime)
.collect();
cx.path_all(span, false, vec!(self_ty), lifetimes,
self_params.into_vec())
}
Literal(ref p) => {
p.to_path(cx, span, self_ty, self_generics)
}
Ptr(..) => { cx.span_bug(span, "pointer in a path in generic `deriving`") }
Tuple(..) => { cx.span_bug(span, "tuple in a path in generic `deriving`") }
}
}
}
fn mk_ty_param(cx: &ExtCtxt, span: Span, name: &str,
bounds: &[Path], unbound: Option<ast::TraitRef>,
self_ident: Ident, self_generics: &Generics) -> ast::TyParam {
let bounds =
bounds.iter().map(|b| {
let path = b.to_path(cx, span, self_ident, self_generics);
cx.typarambound(path)
}).collect();
cx.typaram(span, cx.ident_of(name), bounds, unbound, None)
}
fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>)
-> Generics {
Generics {
lifetimes: lifetimes,
ty_params: OwnedSlice::from_vec(ty_params),
where_clause: ast::WhereClause {
id: ast::DUMMY_NODE_ID,
predicates: Vec::new(),
},
}
}
/// Lifetimes and bounds on type parameters
#[deriving(Clone)]
pub struct LifetimeBounds<'a> {
pub lifetimes: Vec<(&'a str, Vec<&'a str>)>,
pub bounds: Vec<(&'a str, Option<ast::TraitRef>, Vec<Path<'a>>)>,
}
impl<'a> LifetimeBounds<'a> {
pub fn empty() -> LifetimeBounds<'a> {
LifetimeBounds {
lifetimes: Vec::new(), bounds: Vec::new()
}
}
pub fn to_generics(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> Generics {
let lifetimes = self.lifetimes.iter().map(|&(ref lt, ref bounds)| {
let bounds =
bounds.iter().map(
|b| cx.lifetime(span, cx.ident_of(*b).name)).collect();
cx.lifetime_def(span, cx.ident_of(*lt).name, bounds)
}).collect();
let ty_params = self.bounds.iter().map(|t| {
match t {
&(ref name, ref unbound, ref bounds) => {
mk_ty_param(cx,
span,
*name,
bounds.as_slice(),
unbound.clone(),
self_ty,
self_generics)
}
}
}).collect();
mk_generics(lifetimes, ty_params)
}
}
pub fn get_explicit_self(cx: &ExtCtxt, span: Span, self_ptr: &Option<PtrTy>)
-> (P<Expr>, ast::ExplicitSelf) {
// this constructs a fresh `self` path, which will match the fresh `self` binding
// created below.
let self_path = cx.expr_self(span);
match *self_ptr {
None => {
(self_path, respan(span, ast::SelfValue(special_idents::self_)))
}
Some(ref ptr) => {
let self_ty = respan(
span,
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = lt.map(|s| cx.lifetime(span, cx.ident_of(s).name));
ast::SelfRegion(lt, mutbl, special_idents::self_)
}
Raw(_) => cx.span_bug(span, "attempted to use *self in deriving definition")
});
let self_expr = cx.expr_deref(span, self_path);
(self_expr, self_ty)
}
}
}
|
{
borrowed(box Self)
}
|
identifier_body
|
ty.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
A mini version of ast::Ty, which is easier to use, and features an
explicit `Self` type to use when specifying impls to be derived.
*/
pub use self::PtrTy::*;
pub use self::Ty::*;
use ast;
use ast::{Expr,Generics,Ident};
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use codemap::{Span,respan};
use owned_slice::OwnedSlice;
use parse::token::special_idents;
use ptr::P;
/// The types of pointers
#[deriving(Clone)]
pub enum PtrTy<'a> {
/// &'lifetime mut
Borrowed(Option<&'a str>, ast::Mutability),
/// *mut
Raw(ast::Mutability),
}
/// A path, e.g. `::std::option::Option::<int>` (global). Has support
/// for type parameters and a lifetime.
#[deriving(Clone)]
pub struct Path<'a> {
pub path: Vec<&'a str>,
pub lifetime: Option<&'a str>,
pub params: Vec<Box<Ty<'a>>>,
pub global: bool,
}
impl<'a> Path<'a> {
pub fn new<'r>(path: Vec<&'r str> ) -> Path<'r> {
Path::new_(path, None, Vec::new(), true)
}
pub fn new_local<'r>(path: &'r str) -> Path<'r> {
Path::new_(vec!( path ), None, Vec::new(), false)
}
pub fn new_<'r>(path: Vec<&'r str>,
lifetime: Option<&'r str>,
params: Vec<Box<Ty<'r>>>,
global: bool)
-> Path<'r> {
Path {
path: path,
lifetime: lifetime,
params: params,
global: global
}
}
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
let idents = self.path.iter().map(|s| cx.ident_of(*s)).collect();
let lt = mk_lifetimes(cx, span, &self.lifetime);
let tys = self.params.iter().map(|t| t.to_ty(cx, span, self_ty, self_generics)).collect();
cx.path_all(span, self.global, idents, lt, tys)
}
}
/// A type. Supports pointers, Self, and literals
#[deriving(Clone)]
pub enum Ty<'a> {
Self,
/// &/Box/ Ty
Ptr(Box<Ty<'a>>, PtrTy<'a>),
/// mod::mod::Type<[lifetime], [Params...]>, including a plain type
/// parameter, and things like `int`
Literal(Path<'a>),
/// includes unit
Tuple(Vec<Ty<'a>> )
}
pub fn borrowed_ptrty<'r>() -> PtrTy<'r> {
Borrowed(None, ast::MutImmutable)
}
pub fn borrowed<'r>(ty: Box<Ty<'r>>) -> Ty<'r> {
Ptr(ty, borrowed_ptrty())
}
pub fn borrowed_explicit_self<'r>() -> Option<Option<PtrTy<'r>>> {
Some(Some(borrowed_ptrty()))
}
pub fn borrowed_self<'r>() -> Ty<'r> {
borrowed(box Self)
}
pub fn nil_ty<'r>() -> Ty<'r> {
Tuple(Vec::new())
}
fn mk_lifetime(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Option<ast::Lifetime> {
match *lt {
Some(ref s) => Some(cx.lifetime(span, cx.ident_of(*s).name)),
None => None
}
}
fn mk_lifetimes(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Vec<ast::Lifetime> {
match *lt {
Some(ref s) => vec!(cx.lifetime(span, cx.ident_of(*s).name)),
None => vec!()
}
}
impl<'a> Ty<'a> {
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
match *self {
Ptr(ref ty, ref ptr) => {
let raw_ty = ty.to_ty(cx, span, self_ty, self_generics);
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = mk_lifetime(cx, span, lt);
cx.ty_rptr(span, raw_ty, lt, mutbl)
}
Raw(mutbl) => cx.ty_ptr(span, raw_ty, mutbl)
}
}
Literal(ref p) => { p.to_ty(cx, span, self_ty, self_generics) }
Self => {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
Tuple(ref fields) => {
let ty = ast::TyTup(fields.iter()
.map(|f| f.to_ty(cx, span, self_ty, self_generics))
.collect());
cx.ty(span, ty)
}
}
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
match *self {
Self => {
let self_params = self_generics.ty_params.map(|ty_param| {
cx.ty_ident(span, ty_param.ident)
});
let lifetimes = self_generics.lifetimes.iter()
.map(|d| d.lifetime)
.collect();
cx.path_all(span, false, vec!(self_ty), lifetimes,
self_params.into_vec())
}
Literal(ref p) => {
p.to_path(cx, span, self_ty, self_generics)
}
Ptr(..) =>
|
Tuple(..) => { cx.span_bug(span, "tuple in a path in generic `deriving`") }
}
}
}
fn mk_ty_param(cx: &ExtCtxt, span: Span, name: &str,
bounds: &[Path], unbound: Option<ast::TraitRef>,
self_ident: Ident, self_generics: &Generics) -> ast::TyParam {
let bounds =
bounds.iter().map(|b| {
let path = b.to_path(cx, span, self_ident, self_generics);
cx.typarambound(path)
}).collect();
cx.typaram(span, cx.ident_of(name), bounds, unbound, None)
}
fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>)
-> Generics {
Generics {
lifetimes: lifetimes,
ty_params: OwnedSlice::from_vec(ty_params),
where_clause: ast::WhereClause {
id: ast::DUMMY_NODE_ID,
predicates: Vec::new(),
},
}
}
/// Lifetimes and bounds on type parameters
#[deriving(Clone)]
pub struct LifetimeBounds<'a> {
pub lifetimes: Vec<(&'a str, Vec<&'a str>)>,
pub bounds: Vec<(&'a str, Option<ast::TraitRef>, Vec<Path<'a>>)>,
}
impl<'a> LifetimeBounds<'a> {
pub fn empty() -> LifetimeBounds<'a> {
LifetimeBounds {
lifetimes: Vec::new(), bounds: Vec::new()
}
}
pub fn to_generics(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> Generics {
let lifetimes = self.lifetimes.iter().map(|&(ref lt, ref bounds)| {
let bounds =
bounds.iter().map(
|b| cx.lifetime(span, cx.ident_of(*b).name)).collect();
cx.lifetime_def(span, cx.ident_of(*lt).name, bounds)
}).collect();
let ty_params = self.bounds.iter().map(|t| {
match t {
&(ref name, ref unbound, ref bounds) => {
mk_ty_param(cx,
span,
*name,
bounds.as_slice(),
unbound.clone(),
self_ty,
self_generics)
}
}
}).collect();
mk_generics(lifetimes, ty_params)
}
}
pub fn get_explicit_self(cx: &ExtCtxt, span: Span, self_ptr: &Option<PtrTy>)
-> (P<Expr>, ast::ExplicitSelf) {
// this constructs a fresh `self` path, which will match the fresh `self` binding
// created below.
let self_path = cx.expr_self(span);
match *self_ptr {
None => {
(self_path, respan(span, ast::SelfValue(special_idents::self_)))
}
Some(ref ptr) => {
let self_ty = respan(
span,
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = lt.map(|s| cx.lifetime(span, cx.ident_of(s).name));
ast::SelfRegion(lt, mutbl, special_idents::self_)
}
Raw(_) => cx.span_bug(span, "attempted to use *self in deriving definition")
});
let self_expr = cx.expr_deref(span, self_path);
(self_expr, self_ty)
}
}
}
|
{ cx.span_bug(span, "pointer in a path in generic `deriving`") }
|
conditional_block
|
ty.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
A mini version of ast::Ty, which is easier to use, and features an
explicit `Self` type to use when specifying impls to be derived.
*/
pub use self::PtrTy::*;
pub use self::Ty::*;
use ast;
use ast::{Expr,Generics,Ident};
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use codemap::{Span,respan};
use owned_slice::OwnedSlice;
use parse::token::special_idents;
use ptr::P;
/// The types of pointers
#[deriving(Clone)]
pub enum PtrTy<'a> {
/// &'lifetime mut
Borrowed(Option<&'a str>, ast::Mutability),
/// *mut
Raw(ast::Mutability),
}
/// A path, e.g. `::std::option::Option::<int>` (global). Has support
/// for type parameters and a lifetime.
#[deriving(Clone)]
pub struct Path<'a> {
pub path: Vec<&'a str>,
pub lifetime: Option<&'a str>,
pub params: Vec<Box<Ty<'a>>>,
pub global: bool,
}
impl<'a> Path<'a> {
pub fn new<'r>(path: Vec<&'r str> ) -> Path<'r> {
Path::new_(path, None, Vec::new(), true)
}
pub fn new_local<'r>(path: &'r str) -> Path<'r> {
Path::new_(vec!( path ), None, Vec::new(), false)
}
pub fn new_<'r>(path: Vec<&'r str>,
lifetime: Option<&'r str>,
params: Vec<Box<Ty<'r>>>,
global: bool)
-> Path<'r> {
Path {
path: path,
lifetime: lifetime,
params: params,
global: global
}
}
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
let idents = self.path.iter().map(|s| cx.ident_of(*s)).collect();
let lt = mk_lifetimes(cx, span, &self.lifetime);
let tys = self.params.iter().map(|t| t.to_ty(cx, span, self_ty, self_generics)).collect();
cx.path_all(span, self.global, idents, lt, tys)
}
}
/// A type. Supports pointers, Self, and literals
#[deriving(Clone)]
pub enum Ty<'a> {
Self,
/// &/Box/ Ty
Ptr(Box<Ty<'a>>, PtrTy<'a>),
/// mod::mod::Type<[lifetime], [Params...]>, including a plain type
/// parameter, and things like `int`
Literal(Path<'a>),
/// includes unit
Tuple(Vec<Ty<'a>> )
}
pub fn borrowed_ptrty<'r>() -> PtrTy<'r> {
Borrowed(None, ast::MutImmutable)
}
pub fn borrowed<'r>(ty: Box<Ty<'r>>) -> Ty<'r> {
Ptr(ty, borrowed_ptrty())
}
pub fn borrowed_explicit_self<'r>() -> Option<Option<PtrTy<'r>>> {
Some(Some(borrowed_ptrty()))
}
pub fn borrowed_self<'r>() -> Ty<'r> {
borrowed(box Self)
}
pub fn
|
<'r>() -> Ty<'r> {
Tuple(Vec::new())
}
fn mk_lifetime(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Option<ast::Lifetime> {
match *lt {
Some(ref s) => Some(cx.lifetime(span, cx.ident_of(*s).name)),
None => None
}
}
fn mk_lifetimes(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Vec<ast::Lifetime> {
match *lt {
Some(ref s) => vec!(cx.lifetime(span, cx.ident_of(*s).name)),
None => vec!()
}
}
impl<'a> Ty<'a> {
pub fn to_ty(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> P<ast::Ty> {
match *self {
Ptr(ref ty, ref ptr) => {
let raw_ty = ty.to_ty(cx, span, self_ty, self_generics);
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = mk_lifetime(cx, span, lt);
cx.ty_rptr(span, raw_ty, lt, mutbl)
}
Raw(mutbl) => cx.ty_ptr(span, raw_ty, mutbl)
}
}
Literal(ref p) => { p.to_ty(cx, span, self_ty, self_generics) }
Self => {
cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
}
Tuple(ref fields) => {
let ty = ast::TyTup(fields.iter()
.map(|f| f.to_ty(cx, span, self_ty, self_generics))
.collect());
cx.ty(span, ty)
}
}
}
pub fn to_path(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> ast::Path {
match *self {
Self => {
let self_params = self_generics.ty_params.map(|ty_param| {
cx.ty_ident(span, ty_param.ident)
});
let lifetimes = self_generics.lifetimes.iter()
.map(|d| d.lifetime)
.collect();
cx.path_all(span, false, vec!(self_ty), lifetimes,
self_params.into_vec())
}
Literal(ref p) => {
p.to_path(cx, span, self_ty, self_generics)
}
Ptr(..) => { cx.span_bug(span, "pointer in a path in generic `deriving`") }
Tuple(..) => { cx.span_bug(span, "tuple in a path in generic `deriving`") }
}
}
}
fn mk_ty_param(cx: &ExtCtxt, span: Span, name: &str,
bounds: &[Path], unbound: Option<ast::TraitRef>,
self_ident: Ident, self_generics: &Generics) -> ast::TyParam {
let bounds =
bounds.iter().map(|b| {
let path = b.to_path(cx, span, self_ident, self_generics);
cx.typarambound(path)
}).collect();
cx.typaram(span, cx.ident_of(name), bounds, unbound, None)
}
fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>)
-> Generics {
Generics {
lifetimes: lifetimes,
ty_params: OwnedSlice::from_vec(ty_params),
where_clause: ast::WhereClause {
id: ast::DUMMY_NODE_ID,
predicates: Vec::new(),
},
}
}
/// Lifetimes and bounds on type parameters
#[deriving(Clone)]
pub struct LifetimeBounds<'a> {
pub lifetimes: Vec<(&'a str, Vec<&'a str>)>,
pub bounds: Vec<(&'a str, Option<ast::TraitRef>, Vec<Path<'a>>)>,
}
impl<'a> LifetimeBounds<'a> {
pub fn empty() -> LifetimeBounds<'a> {
LifetimeBounds {
lifetimes: Vec::new(), bounds: Vec::new()
}
}
pub fn to_generics(&self,
cx: &ExtCtxt,
span: Span,
self_ty: Ident,
self_generics: &Generics)
-> Generics {
let lifetimes = self.lifetimes.iter().map(|&(ref lt, ref bounds)| {
let bounds =
bounds.iter().map(
|b| cx.lifetime(span, cx.ident_of(*b).name)).collect();
cx.lifetime_def(span, cx.ident_of(*lt).name, bounds)
}).collect();
let ty_params = self.bounds.iter().map(|t| {
match t {
&(ref name, ref unbound, ref bounds) => {
mk_ty_param(cx,
span,
*name,
bounds.as_slice(),
unbound.clone(),
self_ty,
self_generics)
}
}
}).collect();
mk_generics(lifetimes, ty_params)
}
}
pub fn get_explicit_self(cx: &ExtCtxt, span: Span, self_ptr: &Option<PtrTy>)
-> (P<Expr>, ast::ExplicitSelf) {
// this constructs a fresh `self` path, which will match the fresh `self` binding
// created below.
let self_path = cx.expr_self(span);
match *self_ptr {
None => {
(self_path, respan(span, ast::SelfValue(special_idents::self_)))
}
Some(ref ptr) => {
let self_ty = respan(
span,
match *ptr {
Borrowed(ref lt, mutbl) => {
let lt = lt.map(|s| cx.lifetime(span, cx.ident_of(s).name));
ast::SelfRegion(lt, mutbl, special_idents::self_)
}
Raw(_) => cx.span_bug(span, "attempted to use *self in deriving definition")
});
let self_expr = cx.expr_deref(span, self_path);
(self_expr, self_ty)
}
}
}
|
nil_ty
|
identifier_name
|
utils.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Utilities to assist with reading and writing Arrow data as Flight messages
use std::convert::TryFrom;
use crate::{FlightData, SchemaResult};
use arrow::datatypes::{Schema, SchemaRef};
use arrow::error::{ArrowError, Result};
use arrow::ipc::{convert, reader, writer, writer::IpcWriteOptions};
use arrow::record_batch::RecordBatch;
/// Convert a `RecordBatch` to `FlightData` by converting the header and body to bytes
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_data_from_arrow_batch()`
impl From<&RecordBatch> for FlightData {
fn from(batch: &RecordBatch) -> Self {
let options = IpcWriteOptions::default();
flight_data_from_arrow_batch(batch, &options)
}
}
/// Convert a `RecordBatch` to `FlightData` by converting the header and body to bytes
pub fn flight_data_from_arrow_batch(
batch: &RecordBatch,
options: &IpcWriteOptions,
) -> FlightData {
let data = writer::record_batch_to_bytes(batch, &options);
FlightData {
flight_descriptor: None,
app_metadata: vec![],
data_header: data.ipc_message,
data_body: data.arrow_data,
}
}
/// Convert a `Schema` to `SchemaResult` by converting to an IPC message
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_schema_from_arrow_schema()`
impl From<&Schema> for SchemaResult {
fn from(schema: &Schema) -> Self {
let options = IpcWriteOptions::default();
flight_schema_from_arrow_schema(schema, &options)
}
}
/// Convert a `Schema` to `SchemaResult` by converting to an IPC message
pub fn flight_schema_from_arrow_schema(
schema: &Schema,
options: &IpcWriteOptions,
) -> SchemaResult {
SchemaResult {
schema: writer::schema_to_bytes(schema, &options).ipc_message,
}
}
/// Convert a `Schema` to `FlightData` by converting to an IPC message
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_data_from_arrow_schema()`
impl From<&Schema> for FlightData {
fn from(schema: &Schema) -> Self {
let options = writer::IpcWriteOptions::default();
flight_data_from_arrow_schema(schema, &options)
}
}
/// Convert a `Schema` to `FlightData` by converting to an IPC message
pub fn flight_data_from_arrow_schema(
schema: &Schema,
options: &IpcWriteOptions,
|
FlightData {
flight_descriptor: None,
app_metadata: vec![],
data_header: schema.ipc_message,
data_body: vec![],
}
}
/// Try convert `FlightData` into an Arrow Schema
///
/// Returns an error if the `FlightData` header is not a valid IPC schema
impl TryFrom<&FlightData> for Schema {
type Error = ArrowError;
fn try_from(data: &FlightData) -> Result<Self> {
convert::schema_from_bytes(&data.data_header[..]).ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert flight data to Arrow schema".to_string(),
)
})
}
}
/// Try convert `SchemaResult` into an Arrow Schema
///
/// Returns an error if the `FlightData` header is not a valid IPC schema
impl TryFrom<&SchemaResult> for Schema {
type Error = ArrowError;
fn try_from(data: &SchemaResult) -> Result<Self> {
convert::schema_from_bytes(&data.schema[..]).ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert schema result to Arrow schema".to_string(),
)
})
}
}
/// Convert a FlightData message to a RecordBatch
pub fn flight_data_to_arrow_batch(
data: &FlightData,
schema: SchemaRef,
) -> Option<Result<RecordBatch>> {
// check that the data_header is a record batch message
let message = arrow::ipc::get_root_as_message(&data.data_header[..]);
let dictionaries_by_field = Vec::new();
message
.header_as_record_batch()
.ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert flight data header to a record batch".to_string(),
)
})
.map_or_else(
|err| Some(Err(err)),
|batch| {
Some(reader::read_record_batch(
&data.data_body,
batch,
schema,
&dictionaries_by_field,
))
},
)
}
// TODO: add more explicit conversion that exposes flight descriptor and metadata options
|
) -> FlightData {
let schema = writer::schema_to_bytes(schema, &options);
|
random_line_split
|
utils.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Utilities to assist with reading and writing Arrow data as Flight messages
use std::convert::TryFrom;
use crate::{FlightData, SchemaResult};
use arrow::datatypes::{Schema, SchemaRef};
use arrow::error::{ArrowError, Result};
use arrow::ipc::{convert, reader, writer, writer::IpcWriteOptions};
use arrow::record_batch::RecordBatch;
/// Convert a `RecordBatch` to `FlightData` by converting the header and body to bytes
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_data_from_arrow_batch()`
impl From<&RecordBatch> for FlightData {
fn from(batch: &RecordBatch) -> Self
|
}
/// Convert a `RecordBatch` to `FlightData` by converting the header and body to bytes
pub fn flight_data_from_arrow_batch(
batch: &RecordBatch,
options: &IpcWriteOptions,
) -> FlightData {
let data = writer::record_batch_to_bytes(batch, &options);
FlightData {
flight_descriptor: None,
app_metadata: vec![],
data_header: data.ipc_message,
data_body: data.arrow_data,
}
}
/// Convert a `Schema` to `SchemaResult` by converting to an IPC message
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_schema_from_arrow_schema()`
impl From<&Schema> for SchemaResult {
fn from(schema: &Schema) -> Self {
let options = IpcWriteOptions::default();
flight_schema_from_arrow_schema(schema, &options)
}
}
/// Convert a `Schema` to `SchemaResult` by converting to an IPC message
pub fn flight_schema_from_arrow_schema(
schema: &Schema,
options: &IpcWriteOptions,
) -> SchemaResult {
SchemaResult {
schema: writer::schema_to_bytes(schema, &options).ipc_message,
}
}
/// Convert a `Schema` to `FlightData` by converting to an IPC message
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_data_from_arrow_schema()`
impl From<&Schema> for FlightData {
fn from(schema: &Schema) -> Self {
let options = writer::IpcWriteOptions::default();
flight_data_from_arrow_schema(schema, &options)
}
}
/// Convert a `Schema` to `FlightData` by converting to an IPC message
pub fn flight_data_from_arrow_schema(
schema: &Schema,
options: &IpcWriteOptions,
) -> FlightData {
let schema = writer::schema_to_bytes(schema, &options);
FlightData {
flight_descriptor: None,
app_metadata: vec![],
data_header: schema.ipc_message,
data_body: vec![],
}
}
/// Try convert `FlightData` into an Arrow Schema
///
/// Returns an error if the `FlightData` header is not a valid IPC schema
impl TryFrom<&FlightData> for Schema {
type Error = ArrowError;
fn try_from(data: &FlightData) -> Result<Self> {
convert::schema_from_bytes(&data.data_header[..]).ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert flight data to Arrow schema".to_string(),
)
})
}
}
/// Try convert `SchemaResult` into an Arrow Schema
///
/// Returns an error if the `FlightData` header is not a valid IPC schema
impl TryFrom<&SchemaResult> for Schema {
type Error = ArrowError;
fn try_from(data: &SchemaResult) -> Result<Self> {
convert::schema_from_bytes(&data.schema[..]).ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert schema result to Arrow schema".to_string(),
)
})
}
}
/// Convert a FlightData message to a RecordBatch
pub fn flight_data_to_arrow_batch(
data: &FlightData,
schema: SchemaRef,
) -> Option<Result<RecordBatch>> {
// check that the data_header is a record batch message
let message = arrow::ipc::get_root_as_message(&data.data_header[..]);
let dictionaries_by_field = Vec::new();
message
.header_as_record_batch()
.ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert flight data header to a record batch".to_string(),
)
})
.map_or_else(
|err| Some(Err(err)),
|batch| {
Some(reader::read_record_batch(
&data.data_body,
batch,
schema,
&dictionaries_by_field,
))
},
)
}
// TODO: add more explicit conversion that exposes flight descriptor and metadata options
|
{
let options = IpcWriteOptions::default();
flight_data_from_arrow_batch(batch, &options)
}
|
identifier_body
|
utils.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Utilities to assist with reading and writing Arrow data as Flight messages
use std::convert::TryFrom;
use crate::{FlightData, SchemaResult};
use arrow::datatypes::{Schema, SchemaRef};
use arrow::error::{ArrowError, Result};
use arrow::ipc::{convert, reader, writer, writer::IpcWriteOptions};
use arrow::record_batch::RecordBatch;
/// Convert a `RecordBatch` to `FlightData` by converting the header and body to bytes
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_data_from_arrow_batch()`
impl From<&RecordBatch> for FlightData {
fn from(batch: &RecordBatch) -> Self {
let options = IpcWriteOptions::default();
flight_data_from_arrow_batch(batch, &options)
}
}
/// Convert a `RecordBatch` to `FlightData` by converting the header and body to bytes
pub fn flight_data_from_arrow_batch(
batch: &RecordBatch,
options: &IpcWriteOptions,
) -> FlightData {
let data = writer::record_batch_to_bytes(batch, &options);
FlightData {
flight_descriptor: None,
app_metadata: vec![],
data_header: data.ipc_message,
data_body: data.arrow_data,
}
}
/// Convert a `Schema` to `SchemaResult` by converting to an IPC message
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_schema_from_arrow_schema()`
impl From<&Schema> for SchemaResult {
fn from(schema: &Schema) -> Self {
let options = IpcWriteOptions::default();
flight_schema_from_arrow_schema(schema, &options)
}
}
/// Convert a `Schema` to `SchemaResult` by converting to an IPC message
pub fn flight_schema_from_arrow_schema(
schema: &Schema,
options: &IpcWriteOptions,
) -> SchemaResult {
SchemaResult {
schema: writer::schema_to_bytes(schema, &options).ipc_message,
}
}
/// Convert a `Schema` to `FlightData` by converting to an IPC message
///
/// Note: This implicitly uses the default `IpcWriteOptions`. To configure options,
/// use `flight_data_from_arrow_schema()`
impl From<&Schema> for FlightData {
fn from(schema: &Schema) -> Self {
let options = writer::IpcWriteOptions::default();
flight_data_from_arrow_schema(schema, &options)
}
}
/// Convert a `Schema` to `FlightData` by converting to an IPC message
pub fn flight_data_from_arrow_schema(
schema: &Schema,
options: &IpcWriteOptions,
) -> FlightData {
let schema = writer::schema_to_bytes(schema, &options);
FlightData {
flight_descriptor: None,
app_metadata: vec![],
data_header: schema.ipc_message,
data_body: vec![],
}
}
/// Try convert `FlightData` into an Arrow Schema
///
/// Returns an error if the `FlightData` header is not a valid IPC schema
impl TryFrom<&FlightData> for Schema {
type Error = ArrowError;
fn
|
(data: &FlightData) -> Result<Self> {
convert::schema_from_bytes(&data.data_header[..]).ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert flight data to Arrow schema".to_string(),
)
})
}
}
/// Try convert `SchemaResult` into an Arrow Schema
///
/// Returns an error if the `FlightData` header is not a valid IPC schema
impl TryFrom<&SchemaResult> for Schema {
type Error = ArrowError;
fn try_from(data: &SchemaResult) -> Result<Self> {
convert::schema_from_bytes(&data.schema[..]).ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert schema result to Arrow schema".to_string(),
)
})
}
}
/// Convert a FlightData message to a RecordBatch
pub fn flight_data_to_arrow_batch(
data: &FlightData,
schema: SchemaRef,
) -> Option<Result<RecordBatch>> {
// check that the data_header is a record batch message
let message = arrow::ipc::get_root_as_message(&data.data_header[..]);
let dictionaries_by_field = Vec::new();
message
.header_as_record_batch()
.ok_or_else(|| {
ArrowError::ParseError(
"Unable to convert flight data header to a record batch".to_string(),
)
})
.map_or_else(
|err| Some(Err(err)),
|batch| {
Some(reader::read_record_batch(
&data.data_body,
batch,
schema,
&dictionaries_by_field,
))
},
)
}
// TODO: add more explicit conversion that exposes flight descriptor and metadata options
|
try_from
|
identifier_name
|
mat.rs
|
use num::Float;
use std::fmt;
use std::ops::{Add, Sub, Mul};
use numvec::Vec3f;
/// The type of matrix elements.
pub type Scalar = f32;
/// A 4x4 matrix type stored in column-major order for interoperability with
/// OpenGL.
///
/// Supports the creation of isometries and projections in homogenous
/// coordinates. In terms of operations, only transposition and multiplication
/// are currently supported (and not super-efficiently implemented).
///
/// _Note:_ The 16 elements are stored in place, so copies are not cheap.
#[repr(packed)]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct Mat4 {
data: [Scalar; 16]
}
impl Mat4 {
pub fn new(m00: Scalar, m01: Scalar, m02: Scalar, m03: Scalar,
m10: Scalar, m11: Scalar, m12: Scalar, m13: Scalar,
m20: Scalar, m21: Scalar, m22: Scalar, m23: Scalar,
m30: Scalar, m31: Scalar, m32: Scalar, m33: Scalar) -> Mat4 {
// In my mind vectors are columns, hence matrices need to be transposed
// to the OpenGL memory order.
Mat4 { data: [m00, m10, m20, m30, m01, m11, m21, m31,
m02, m12, m22, m32, m03, m13, m23, m33] }
}
pub fn new_identity() -> Mat4 {
Mat4::new(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0)
}
/// Creates a perspective projection matrix from.
///
/// The parameters are:
/// * `fov_degrees` - Horizontal field of view.
/// * `aspect_ratio` - Ratio between width and height of the view.
/// * `near`, `far` - The Z coordinate of the near and far planes.
pub fn new_perspective(fov_degrees: Scalar, aspect_ratio: Scalar,
near: Scalar, far: Scalar) -> Mat4 {
let fov = (3.1415926538 * fov_degrees) / 180.0;
let f = 1.0 / (fov * 0.5).tan();
Mat4::new(
f / aspect_ratio, 0.0, 0.0, 0.0,
0.0, f, 0.0, 0.0,
0.0, 0.0, (far + near) / (near - far), 2.0*far*near / (near - far),
0.0, 0.0, -1.0, 0.0)
}
/// Creates a matrix which rotates points by `angle_radians` around `axis`.
pub fn new_axis_rotation(axis: &Vec3f, angle_radians: Scalar) -> Mat4 {
let ca = angle_radians.cos();
let sa = angle_radians.sin();
let nca = 1.0 - ca;
let u = axis;
Mat4::new(
ca + u.x*u.x*nca, u.x*u.y*nca - u.z*sa, u.x*u.z*nca + u.y*sa, 0.0,
u.y*u.x*nca + u.z*sa, ca + u.y*u.y*nca, u.y*u.z*nca - u.x*sa, 0.0,
u.z*u.x*nca - u.y*sa, u.z*u.y*nca + u.x*sa, ca + u.z*u.z*nca, 0.0,
0.0, 0.0, 0.0, 1.0
)
}
/// Creates a rotation matrix from the three _Euler angles_.
pub fn new_euler_rotation(yaw: f32, pitch: f32, roll: f32) -> Mat4 {
let (ca, sa) = (pitch.cos(), pitch.sin());
let (cb, sb) = (yaw.cos(), yaw.sin());
let (cc, sc) = (roll.cos(), roll.sin());
Mat4::new(
cb * cc, -cb * sc, sb, 0.0,
sa * sb * cc + ca * sc, -sa * sb * sc + ca * cc, -sa * cb, 0.0,
-ca * sb * cc + sa * sc, ca * sb * sc + sa * cc, ca * cb, 0.0,
0.0, 0.0, 0.0, 1.0)
}
/// Creates a translation matrix which maps points `p` to `p + by`.
pub fn new_translation(by: Vec3f) -> Mat4
|
/// Returns the transpose of the matrix (columns swapped with rows).
pub fn transposed(&self) -> Mat4 {
let m = &self.data;
// m is in column-major order, so calling with new in row-order will
// transpose it.
Mat4::new(m[0], m[1], m[2], m[3],
m[4], m[5], m[6], m[7],
m[8], m[9], m[10], m[11],
m[12], m[13], m[14], m[15])
}
pub fn get(&self, row: usize, column: usize) -> Scalar {
self.data[column * 4 + row]
}
pub fn as_scalar_ptr(&self) -> *const Scalar {
self.data.as_ptr()
}
pub fn approx_eq(&self, rhs: &Mat4, tol: Scalar) -> bool {
self.data.iter().zip(rhs.data.iter()).all(|(x, y)| (x - y).abs() <= tol)
}
}
impl fmt::Debug for Mat4 {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter,
"[{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e}]",
self.get(0, 0), self.get(0, 1), self.get(0, 2), self.get(0, 3),
self.get(1, 0), self.get(1, 1), self.get(1, 2), self.get(1, 3),
self.get(2, 0), self.get(2, 1), self.get(2, 2), self.get(2, 3),
self.get(3, 0), self.get(3, 1), self.get(3, 2), self.get(3, 3))
}
}
impl<'a, 'b> Mul<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn mul(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] * r[0] + l[4] * r[1] + l[8] * r[2] + l[12] * r[3],
l[1] * r[0] + l[5] * r[1] + l[9] * r[2] + l[13] * r[3],
l[2] * r[0] + l[6] * r[1] + l[10] * r[2] + l[14] * r[3],
l[3] * r[0] + l[7] * r[1] + l[11] * r[2] + l[15] * r[3],
l[0] * r[4] + l[4] * r[5] + l[8] * r[6] + l[12] * r[7],
l[1] * r[4] + l[5] * r[5] + l[9] * r[6] + l[13] * r[7],
l[2] * r[4] + l[6] * r[5] + l[10] * r[6] + l[14] * r[7],
l[3] * r[4] + l[7] * r[5] + l[11] * r[6] + l[15] * r[7],
l[0] * r[8] + l[4] * r[9] + l[ 8] * r[10] + l[12] * r[11],
l[1] * r[8] + l[5] * r[9] + l[ 9] * r[10] + l[13] * r[11],
l[2] * r[8] + l[6] * r[9] + l[10] * r[10] + l[14] * r[11],
l[3] * r[8] + l[7] * r[9] + l[11] * r[10] + l[15] * r[11],
l[0] * r[12] + l[4] * r[13] + l[ 8] * r[14] + l[12] * r[15],
l[1] * r[12] + l[5] * r[13] + l[ 9] * r[14] + l[13] * r[15],
l[2] * r[12] + l[6] * r[13] + l[10] * r[14] + l[14] * r[15],
l[3] * r[12] + l[7] * r[13] + l[11] * r[14] + l[15] * r[15]],
}
}
}
impl<'a, 'b> Add<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn add(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] + r[0], l[1] + r[1], l[2] + r[2], l[3] + r[3],
l[4] + r[4], l[5] + r[5], l[6] + r[6], l[7] + r[7],
l[8] + r[8], l[9] + r[9], l[10] + r[10], l[11] + r[11],
l[12] + r[12], l[13] + r[13], l[14] + r[14], l[15] + r[15]],
}
}
}
impl<'a, 'b> Sub<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn sub(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] - r[0], l[1] - r[1], l[2] - r[2], l[3] - r[3],
l[4] - r[4], l[5] - r[5], l[6] - r[6], l[7] - r[7],
l[8] - r[8], l[9] - r[9], l[10] - r[10], l[11] - r[11],
l[12] - r[12], l[13] - r[13], l[14] - r[14], l[15] - r[15]],
}
}
}
impl Mul<Mat4> for Mat4 {
type Output = Mat4;
fn mul(self, rhs: Mat4) -> Mat4 { &self * &rhs }
}
impl Add<Mat4> for Mat4 {
type Output = Mat4;
fn add(self, rhs: Mat4) -> Mat4 { &self + &rhs }
}
impl Sub<Mat4> for Mat4 {
type Output = Mat4;
fn sub(self, rhs: Mat4) -> Mat4 { &self - &rhs }
}
impl PartialEq for Mat4 {
fn eq(&self, rhs: &Mat4) -> bool {
self.data.iter().zip(rhs.data.iter()).all(|(x, y)| x == y)
}
}
#[cfg(test)]
mod test {
use super::Mat4;
#[test]
fn test_mul() {
let a = Mat4::new(4.0, 8.0, 1.0, 6.0,
9.0, 4.0, 2.0, 1.0,
4.0, 3.0, 9.0, 3.0,
2.0, 4.0, 9.0, 4.0);
let b = Mat4::new(8.0, 6.0, 5.0, 7.0,
1.0, 7.0, 3.0, 2.0,
1.0, 6.0, 7.0, 4.0,
2.0, 5.0, 2.0, 6.0);
let exp_ab = Mat4::new(53.0, 116.0, 63.0, 84.0,
80.0, 99.0, 73.0, 85.0,
50.0, 114.0, 98.0, 88.0,
37.0, 114.0, 93.0, 82.0);
assert_eq!(exp_ab, a * b);
}
}
|
{
Mat4::new(1.0, 0.0, 0.0, by.x,
0.0, 1.0, 0.0, by.y,
0.0, 0.0, 1.0, by.z,
0.0, 0.0, 0.0, 1.0)
}
|
identifier_body
|
mat.rs
|
use num::Float;
use std::fmt;
use std::ops::{Add, Sub, Mul};
use numvec::Vec3f;
/// The type of matrix elements.
pub type Scalar = f32;
/// A 4x4 matrix type stored in column-major order for interoperability with
/// OpenGL.
///
/// Supports the creation of isometries and projections in homogenous
/// coordinates. In terms of operations, only transposition and multiplication
/// are currently supported (and not super-efficiently implemented).
///
/// _Note:_ The 16 elements are stored in place, so copies are not cheap.
#[repr(packed)]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct Mat4 {
data: [Scalar; 16]
}
impl Mat4 {
pub fn new(m00: Scalar, m01: Scalar, m02: Scalar, m03: Scalar,
m10: Scalar, m11: Scalar, m12: Scalar, m13: Scalar,
m20: Scalar, m21: Scalar, m22: Scalar, m23: Scalar,
m30: Scalar, m31: Scalar, m32: Scalar, m33: Scalar) -> Mat4 {
// In my mind vectors are columns, hence matrices need to be transposed
// to the OpenGL memory order.
Mat4 { data: [m00, m10, m20, m30, m01, m11, m21, m31,
m02, m12, m22, m32, m03, m13, m23, m33] }
}
pub fn new_identity() -> Mat4 {
Mat4::new(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0)
}
/// Creates a perspective projection matrix from.
///
/// The parameters are:
/// * `fov_degrees` - Horizontal field of view.
/// * `aspect_ratio` - Ratio between width and height of the view.
/// * `near`, `far` - The Z coordinate of the near and far planes.
pub fn new_perspective(fov_degrees: Scalar, aspect_ratio: Scalar,
near: Scalar, far: Scalar) -> Mat4 {
let fov = (3.1415926538 * fov_degrees) / 180.0;
let f = 1.0 / (fov * 0.5).tan();
Mat4::new(
f / aspect_ratio, 0.0, 0.0, 0.0,
0.0, f, 0.0, 0.0,
0.0, 0.0, (far + near) / (near - far), 2.0*far*near / (near - far),
0.0, 0.0, -1.0, 0.0)
}
/// Creates a matrix which rotates points by `angle_radians` around `axis`.
pub fn new_axis_rotation(axis: &Vec3f, angle_radians: Scalar) -> Mat4 {
let ca = angle_radians.cos();
let sa = angle_radians.sin();
let nca = 1.0 - ca;
let u = axis;
Mat4::new(
ca + u.x*u.x*nca, u.x*u.y*nca - u.z*sa, u.x*u.z*nca + u.y*sa, 0.0,
u.y*u.x*nca + u.z*sa, ca + u.y*u.y*nca, u.y*u.z*nca - u.x*sa, 0.0,
u.z*u.x*nca - u.y*sa, u.z*u.y*nca + u.x*sa, ca + u.z*u.z*nca, 0.0,
0.0, 0.0, 0.0, 1.0
)
}
/// Creates a rotation matrix from the three _Euler angles_.
pub fn new_euler_rotation(yaw: f32, pitch: f32, roll: f32) -> Mat4 {
let (ca, sa) = (pitch.cos(), pitch.sin());
let (cb, sb) = (yaw.cos(), yaw.sin());
let (cc, sc) = (roll.cos(), roll.sin());
Mat4::new(
cb * cc, -cb * sc, sb, 0.0,
sa * sb * cc + ca * sc, -sa * sb * sc + ca * cc, -sa * cb, 0.0,
-ca * sb * cc + sa * sc, ca * sb * sc + sa * cc, ca * cb, 0.0,
0.0, 0.0, 0.0, 1.0)
}
/// Creates a translation matrix which maps points `p` to `p + by`.
pub fn new_translation(by: Vec3f) -> Mat4 {
Mat4::new(1.0, 0.0, 0.0, by.x,
0.0, 1.0, 0.0, by.y,
0.0, 0.0, 1.0, by.z,
0.0, 0.0, 0.0, 1.0)
}
/// Returns the transpose of the matrix (columns swapped with rows).
pub fn transposed(&self) -> Mat4 {
let m = &self.data;
// m is in column-major order, so calling with new in row-order will
// transpose it.
Mat4::new(m[0], m[1], m[2], m[3],
m[4], m[5], m[6], m[7],
m[8], m[9], m[10], m[11],
m[12], m[13], m[14], m[15])
}
pub fn get(&self, row: usize, column: usize) -> Scalar {
self.data[column * 4 + row]
}
pub fn as_scalar_ptr(&self) -> *const Scalar {
self.data.as_ptr()
}
pub fn approx_eq(&self, rhs: &Mat4, tol: Scalar) -> bool {
self.data.iter().zip(rhs.data.iter()).all(|(x, y)| (x - y).abs() <= tol)
}
}
impl fmt::Debug for Mat4 {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter,
"[{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e}]",
self.get(0, 0), self.get(0, 1), self.get(0, 2), self.get(0, 3),
self.get(1, 0), self.get(1, 1), self.get(1, 2), self.get(1, 3),
self.get(2, 0), self.get(2, 1), self.get(2, 2), self.get(2, 3),
self.get(3, 0), self.get(3, 1), self.get(3, 2), self.get(3, 3))
}
}
impl<'a, 'b> Mul<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn mul(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] * r[0] + l[4] * r[1] + l[8] * r[2] + l[12] * r[3],
l[1] * r[0] + l[5] * r[1] + l[9] * r[2] + l[13] * r[3],
l[2] * r[0] + l[6] * r[1] + l[10] * r[2] + l[14] * r[3],
l[3] * r[0] + l[7] * r[1] + l[11] * r[2] + l[15] * r[3],
l[0] * r[4] + l[4] * r[5] + l[8] * r[6] + l[12] * r[7],
l[1] * r[4] + l[5] * r[5] + l[9] * r[6] + l[13] * r[7],
l[2] * r[4] + l[6] * r[5] + l[10] * r[6] + l[14] * r[7],
l[3] * r[4] + l[7] * r[5] + l[11] * r[6] + l[15] * r[7],
l[0] * r[8] + l[4] * r[9] + l[ 8] * r[10] + l[12] * r[11],
l[1] * r[8] + l[5] * r[9] + l[ 9] * r[10] + l[13] * r[11],
l[2] * r[8] + l[6] * r[9] + l[10] * r[10] + l[14] * r[11],
l[3] * r[8] + l[7] * r[9] + l[11] * r[10] + l[15] * r[11],
l[0] * r[12] + l[4] * r[13] + l[ 8] * r[14] + l[12] * r[15],
l[1] * r[12] + l[5] * r[13] + l[ 9] * r[14] + l[13] * r[15],
l[2] * r[12] + l[6] * r[13] + l[10] * r[14] + l[14] * r[15],
l[3] * r[12] + l[7] * r[13] + l[11] * r[14] + l[15] * r[15]],
}
}
}
impl<'a, 'b> Add<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn add(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] + r[0], l[1] + r[1], l[2] + r[2], l[3] + r[3],
l[4] + r[4], l[5] + r[5], l[6] + r[6], l[7] + r[7],
l[8] + r[8], l[9] + r[9], l[10] + r[10], l[11] + r[11],
l[12] + r[12], l[13] + r[13], l[14] + r[14], l[15] + r[15]],
}
}
}
impl<'a, 'b> Sub<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn sub(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
|
l[12] - r[12], l[13] - r[13], l[14] - r[14], l[15] - r[15]],
}
}
}
impl Mul<Mat4> for Mat4 {
type Output = Mat4;
fn mul(self, rhs: Mat4) -> Mat4 { &self * &rhs }
}
impl Add<Mat4> for Mat4 {
type Output = Mat4;
fn add(self, rhs: Mat4) -> Mat4 { &self + &rhs }
}
impl Sub<Mat4> for Mat4 {
type Output = Mat4;
fn sub(self, rhs: Mat4) -> Mat4 { &self - &rhs }
}
impl PartialEq for Mat4 {
fn eq(&self, rhs: &Mat4) -> bool {
self.data.iter().zip(rhs.data.iter()).all(|(x, y)| x == y)
}
}
#[cfg(test)]
mod test {
use super::Mat4;
#[test]
fn test_mul() {
let a = Mat4::new(4.0, 8.0, 1.0, 6.0,
9.0, 4.0, 2.0, 1.0,
4.0, 3.0, 9.0, 3.0,
2.0, 4.0, 9.0, 4.0);
let b = Mat4::new(8.0, 6.0, 5.0, 7.0,
1.0, 7.0, 3.0, 2.0,
1.0, 6.0, 7.0, 4.0,
2.0, 5.0, 2.0, 6.0);
let exp_ab = Mat4::new(53.0, 116.0, 63.0, 84.0,
80.0, 99.0, 73.0, 85.0,
50.0, 114.0, 98.0, 88.0,
37.0, 114.0, 93.0, 82.0);
assert_eq!(exp_ab, a * b);
}
}
|
Mat4 {
data: [l[0] - r[0], l[1] - r[1], l[2] - r[2], l[3] - r[3],
l[4] - r[4], l[5] - r[5], l[6] - r[6], l[7] - r[7],
l[8] - r[8], l[9] - r[9], l[10] - r[10], l[11] - r[11],
|
random_line_split
|
mat.rs
|
use num::Float;
use std::fmt;
use std::ops::{Add, Sub, Mul};
use numvec::Vec3f;
/// The type of matrix elements.
pub type Scalar = f32;
/// A 4x4 matrix type stored in column-major order for interoperability with
/// OpenGL.
///
/// Supports the creation of isometries and projections in homogenous
/// coordinates. In terms of operations, only transposition and multiplication
/// are currently supported (and not super-efficiently implemented).
///
/// _Note:_ The 16 elements are stored in place, so copies are not cheap.
#[repr(packed)]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct Mat4 {
data: [Scalar; 16]
}
impl Mat4 {
pub fn new(m00: Scalar, m01: Scalar, m02: Scalar, m03: Scalar,
m10: Scalar, m11: Scalar, m12: Scalar, m13: Scalar,
m20: Scalar, m21: Scalar, m22: Scalar, m23: Scalar,
m30: Scalar, m31: Scalar, m32: Scalar, m33: Scalar) -> Mat4 {
// In my mind vectors are columns, hence matrices need to be transposed
// to the OpenGL memory order.
Mat4 { data: [m00, m10, m20, m30, m01, m11, m21, m31,
m02, m12, m22, m32, m03, m13, m23, m33] }
}
pub fn new_identity() -> Mat4 {
Mat4::new(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0)
}
/// Creates a perspective projection matrix from.
///
/// The parameters are:
/// * `fov_degrees` - Horizontal field of view.
/// * `aspect_ratio` - Ratio between width and height of the view.
/// * `near`, `far` - The Z coordinate of the near and far planes.
pub fn new_perspective(fov_degrees: Scalar, aspect_ratio: Scalar,
near: Scalar, far: Scalar) -> Mat4 {
let fov = (3.1415926538 * fov_degrees) / 180.0;
let f = 1.0 / (fov * 0.5).tan();
Mat4::new(
f / aspect_ratio, 0.0, 0.0, 0.0,
0.0, f, 0.0, 0.0,
0.0, 0.0, (far + near) / (near - far), 2.0*far*near / (near - far),
0.0, 0.0, -1.0, 0.0)
}
/// Creates a matrix which rotates points by `angle_radians` around `axis`.
pub fn
|
(axis: &Vec3f, angle_radians: Scalar) -> Mat4 {
let ca = angle_radians.cos();
let sa = angle_radians.sin();
let nca = 1.0 - ca;
let u = axis;
Mat4::new(
ca + u.x*u.x*nca, u.x*u.y*nca - u.z*sa, u.x*u.z*nca + u.y*sa, 0.0,
u.y*u.x*nca + u.z*sa, ca + u.y*u.y*nca, u.y*u.z*nca - u.x*sa, 0.0,
u.z*u.x*nca - u.y*sa, u.z*u.y*nca + u.x*sa, ca + u.z*u.z*nca, 0.0,
0.0, 0.0, 0.0, 1.0
)
}
/// Creates a rotation matrix from the three _Euler angles_.
pub fn new_euler_rotation(yaw: f32, pitch: f32, roll: f32) -> Mat4 {
let (ca, sa) = (pitch.cos(), pitch.sin());
let (cb, sb) = (yaw.cos(), yaw.sin());
let (cc, sc) = (roll.cos(), roll.sin());
Mat4::new(
cb * cc, -cb * sc, sb, 0.0,
sa * sb * cc + ca * sc, -sa * sb * sc + ca * cc, -sa * cb, 0.0,
-ca * sb * cc + sa * sc, ca * sb * sc + sa * cc, ca * cb, 0.0,
0.0, 0.0, 0.0, 1.0)
}
/// Creates a translation matrix which maps points `p` to `p + by`.
pub fn new_translation(by: Vec3f) -> Mat4 {
Mat4::new(1.0, 0.0, 0.0, by.x,
0.0, 1.0, 0.0, by.y,
0.0, 0.0, 1.0, by.z,
0.0, 0.0, 0.0, 1.0)
}
/// Returns the transpose of the matrix (columns swapped with rows).
pub fn transposed(&self) -> Mat4 {
let m = &self.data;
// m is in column-major order, so calling with new in row-order will
// transpose it.
Mat4::new(m[0], m[1], m[2], m[3],
m[4], m[5], m[6], m[7],
m[8], m[9], m[10], m[11],
m[12], m[13], m[14], m[15])
}
pub fn get(&self, row: usize, column: usize) -> Scalar {
self.data[column * 4 + row]
}
pub fn as_scalar_ptr(&self) -> *const Scalar {
self.data.as_ptr()
}
pub fn approx_eq(&self, rhs: &Mat4, tol: Scalar) -> bool {
self.data.iter().zip(rhs.data.iter()).all(|(x, y)| (x - y).abs() <= tol)
}
}
impl fmt::Debug for Mat4 {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter,
"[{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e};\n\
{:10.3e} {:10.3e} {:10.3e} {:10.3e}]",
self.get(0, 0), self.get(0, 1), self.get(0, 2), self.get(0, 3),
self.get(1, 0), self.get(1, 1), self.get(1, 2), self.get(1, 3),
self.get(2, 0), self.get(2, 1), self.get(2, 2), self.get(2, 3),
self.get(3, 0), self.get(3, 1), self.get(3, 2), self.get(3, 3))
}
}
impl<'a, 'b> Mul<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn mul(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] * r[0] + l[4] * r[1] + l[8] * r[2] + l[12] * r[3],
l[1] * r[0] + l[5] * r[1] + l[9] * r[2] + l[13] * r[3],
l[2] * r[0] + l[6] * r[1] + l[10] * r[2] + l[14] * r[3],
l[3] * r[0] + l[7] * r[1] + l[11] * r[2] + l[15] * r[3],
l[0] * r[4] + l[4] * r[5] + l[8] * r[6] + l[12] * r[7],
l[1] * r[4] + l[5] * r[5] + l[9] * r[6] + l[13] * r[7],
l[2] * r[4] + l[6] * r[5] + l[10] * r[6] + l[14] * r[7],
l[3] * r[4] + l[7] * r[5] + l[11] * r[6] + l[15] * r[7],
l[0] * r[8] + l[4] * r[9] + l[ 8] * r[10] + l[12] * r[11],
l[1] * r[8] + l[5] * r[9] + l[ 9] * r[10] + l[13] * r[11],
l[2] * r[8] + l[6] * r[9] + l[10] * r[10] + l[14] * r[11],
l[3] * r[8] + l[7] * r[9] + l[11] * r[10] + l[15] * r[11],
l[0] * r[12] + l[4] * r[13] + l[ 8] * r[14] + l[12] * r[15],
l[1] * r[12] + l[5] * r[13] + l[ 9] * r[14] + l[13] * r[15],
l[2] * r[12] + l[6] * r[13] + l[10] * r[14] + l[14] * r[15],
l[3] * r[12] + l[7] * r[13] + l[11] * r[14] + l[15] * r[15]],
}
}
}
impl<'a, 'b> Add<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn add(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] + r[0], l[1] + r[1], l[2] + r[2], l[3] + r[3],
l[4] + r[4], l[5] + r[5], l[6] + r[6], l[7] + r[7],
l[8] + r[8], l[9] + r[9], l[10] + r[10], l[11] + r[11],
l[12] + r[12], l[13] + r[13], l[14] + r[14], l[15] + r[15]],
}
}
}
impl<'a, 'b> Sub<&'a Mat4> for &'b Mat4 {
type Output = Mat4;
fn sub(self, rhs: &'a Mat4) -> Mat4 {
let l = &self.data;
let r = &rhs.data;
Mat4 {
data: [l[0] - r[0], l[1] - r[1], l[2] - r[2], l[3] - r[3],
l[4] - r[4], l[5] - r[5], l[6] - r[6], l[7] - r[7],
l[8] - r[8], l[9] - r[9], l[10] - r[10], l[11] - r[11],
l[12] - r[12], l[13] - r[13], l[14] - r[14], l[15] - r[15]],
}
}
}
impl Mul<Mat4> for Mat4 {
type Output = Mat4;
fn mul(self, rhs: Mat4) -> Mat4 { &self * &rhs }
}
impl Add<Mat4> for Mat4 {
type Output = Mat4;
fn add(self, rhs: Mat4) -> Mat4 { &self + &rhs }
}
impl Sub<Mat4> for Mat4 {
type Output = Mat4;
fn sub(self, rhs: Mat4) -> Mat4 { &self - &rhs }
}
impl PartialEq for Mat4 {
fn eq(&self, rhs: &Mat4) -> bool {
self.data.iter().zip(rhs.data.iter()).all(|(x, y)| x == y)
}
}
#[cfg(test)]
mod test {
use super::Mat4;
#[test]
fn test_mul() {
let a = Mat4::new(4.0, 8.0, 1.0, 6.0,
9.0, 4.0, 2.0, 1.0,
4.0, 3.0, 9.0, 3.0,
2.0, 4.0, 9.0, 4.0);
let b = Mat4::new(8.0, 6.0, 5.0, 7.0,
1.0, 7.0, 3.0, 2.0,
1.0, 6.0, 7.0, 4.0,
2.0, 5.0, 2.0, 6.0);
let exp_ab = Mat4::new(53.0, 116.0, 63.0, 84.0,
80.0, 99.0, 73.0, 85.0,
50.0, 114.0, 98.0, 88.0,
37.0, 114.0, 93.0, 82.0);
assert_eq!(exp_ab, a * b);
}
}
|
new_axis_rotation
|
identifier_name
|
pager.rs
|
// SPDX-License-Identifier: Unlicense
//! Interface for paging functions.
use crate::pager::{
Attributes, FixedOffset, FrameAllocator, PhysAddr, PhysAddrRange, Translate, VirtAddr,
VirtAddrRange,
};
use crate::util::locked::Locked;
use crate::Result;
use core::any::Any;
/// Each architecture must supply the following entry points for paging..
pub trait PagerTrait {
/// Physical address range of ram
fn ram_range() -> PhysAddrRange;
/// Base virtual address of kernel address space
fn kernel_base() -> VirtAddr;
/// Kernel offset on boot
fn kernel_offset() -> FixedOffset;
/// Kernel boot image
fn boot_image() -> PhysAddrRange;
/// Kernel code
fn text_image() -> PhysAddrRange;
/// Kernel read-only data
fn static_image() -> PhysAddrRange;
/// Kernel zero-initialised
fn bss_image() -> PhysAddrRange;
/// Kernel dynamic data (includes bss)
fn data_image() -> PhysAddrRange;
/// Kernel reset stack
fn stack_range() -> PhysAddrRange;
/// Initialise virtual memory management.
fn pager_init() -> Result<()>;
/// Enable virtual memory management.
fn enable_paging(page_directory: &impl PageDirectory) -> Result<()>;
/// Move the stack pointer and branch
fn move_stack(stack_pointer: VirtAddr, next: fn() ->!) ->!;
}
/// Methods to maintain a directory of virtual to physical addresses.
pub trait PageDirectory {
/// Enable downshift to arch-specific concrete page directories.
fn as_any(&self) -> &dyn Any;
/// Map physical address range at offset.
fn map_translation(
&mut self,
virt_addr_range: VirtAddrRange,
translation: impl Translate + core::fmt::Debug,
attributes: Attributes,
allocator: &Locked<impl FrameAllocator>,
mem_access_translation: &impl Translate,
) -> Result<VirtAddrRange>;
/// Return the current physical address for a virtual address
fn maps_to(
&self,
virt_addr: VirtAddr,
mem_access_translation: &FixedOffset,
) -> Result<PhysAddr>;
/// Unmap a previously mapped range, and return any memory to the allocator.
fn unmap(
&mut self,
virt_addr_range: VirtAddrRange,
allocator: &'static Locked<impl FrameAllocator>,
mem_access_translation: &FixedOffset,
) -> Result<()>;
/// Log the state of the page directory at debug.
fn dump(&self, mem_access_translation: &impl Translate);
}
/// Construct an empty page directory.
/// TODO: Should this be in Arch trait? limitation of generics in traits right now.
pub fn
|
() -> impl PageDirectory {
super::arch::new_page_directory()
}
|
new_page_directory
|
identifier_name
|
pager.rs
|
// SPDX-License-Identifier: Unlicense
//! Interface for paging functions.
use crate::pager::{
Attributes, FixedOffset, FrameAllocator, PhysAddr, PhysAddrRange, Translate, VirtAddr,
VirtAddrRange,
|
use crate::Result;
use core::any::Any;
/// Each architecture must supply the following entry points for paging..
pub trait PagerTrait {
/// Physical address range of ram
fn ram_range() -> PhysAddrRange;
/// Base virtual address of kernel address space
fn kernel_base() -> VirtAddr;
/// Kernel offset on boot
fn kernel_offset() -> FixedOffset;
/// Kernel boot image
fn boot_image() -> PhysAddrRange;
/// Kernel code
fn text_image() -> PhysAddrRange;
/// Kernel read-only data
fn static_image() -> PhysAddrRange;
/// Kernel zero-initialised
fn bss_image() -> PhysAddrRange;
/// Kernel dynamic data (includes bss)
fn data_image() -> PhysAddrRange;
/// Kernel reset stack
fn stack_range() -> PhysAddrRange;
/// Initialise virtual memory management.
fn pager_init() -> Result<()>;
/// Enable virtual memory management.
fn enable_paging(page_directory: &impl PageDirectory) -> Result<()>;
/// Move the stack pointer and branch
fn move_stack(stack_pointer: VirtAddr, next: fn() ->!) ->!;
}
/// Methods to maintain a directory of virtual to physical addresses.
pub trait PageDirectory {
/// Enable downshift to arch-specific concrete page directories.
fn as_any(&self) -> &dyn Any;
/// Map physical address range at offset.
fn map_translation(
&mut self,
virt_addr_range: VirtAddrRange,
translation: impl Translate + core::fmt::Debug,
attributes: Attributes,
allocator: &Locked<impl FrameAllocator>,
mem_access_translation: &impl Translate,
) -> Result<VirtAddrRange>;
/// Return the current physical address for a virtual address
fn maps_to(
&self,
virt_addr: VirtAddr,
mem_access_translation: &FixedOffset,
) -> Result<PhysAddr>;
/// Unmap a previously mapped range, and return any memory to the allocator.
fn unmap(
&mut self,
virt_addr_range: VirtAddrRange,
allocator: &'static Locked<impl FrameAllocator>,
mem_access_translation: &FixedOffset,
) -> Result<()>;
/// Log the state of the page directory at debug.
fn dump(&self, mem_access_translation: &impl Translate);
}
/// Construct an empty page directory.
/// TODO: Should this be in Arch trait? limitation of generics in traits right now.
pub fn new_page_directory() -> impl PageDirectory {
super::arch::new_page_directory()
}
|
};
use crate::util::locked::Locked;
|
random_line_split
|
pager.rs
|
// SPDX-License-Identifier: Unlicense
//! Interface for paging functions.
use crate::pager::{
Attributes, FixedOffset, FrameAllocator, PhysAddr, PhysAddrRange, Translate, VirtAddr,
VirtAddrRange,
};
use crate::util::locked::Locked;
use crate::Result;
use core::any::Any;
/// Each architecture must supply the following entry points for paging..
pub trait PagerTrait {
/// Physical address range of ram
fn ram_range() -> PhysAddrRange;
/// Base virtual address of kernel address space
fn kernel_base() -> VirtAddr;
/// Kernel offset on boot
fn kernel_offset() -> FixedOffset;
/// Kernel boot image
fn boot_image() -> PhysAddrRange;
/// Kernel code
fn text_image() -> PhysAddrRange;
/// Kernel read-only data
fn static_image() -> PhysAddrRange;
/// Kernel zero-initialised
fn bss_image() -> PhysAddrRange;
/// Kernel dynamic data (includes bss)
fn data_image() -> PhysAddrRange;
/// Kernel reset stack
fn stack_range() -> PhysAddrRange;
/// Initialise virtual memory management.
fn pager_init() -> Result<()>;
/// Enable virtual memory management.
fn enable_paging(page_directory: &impl PageDirectory) -> Result<()>;
/// Move the stack pointer and branch
fn move_stack(stack_pointer: VirtAddr, next: fn() ->!) ->!;
}
/// Methods to maintain a directory of virtual to physical addresses.
pub trait PageDirectory {
/// Enable downshift to arch-specific concrete page directories.
fn as_any(&self) -> &dyn Any;
/// Map physical address range at offset.
fn map_translation(
&mut self,
virt_addr_range: VirtAddrRange,
translation: impl Translate + core::fmt::Debug,
attributes: Attributes,
allocator: &Locked<impl FrameAllocator>,
mem_access_translation: &impl Translate,
) -> Result<VirtAddrRange>;
/// Return the current physical address for a virtual address
fn maps_to(
&self,
virt_addr: VirtAddr,
mem_access_translation: &FixedOffset,
) -> Result<PhysAddr>;
/// Unmap a previously mapped range, and return any memory to the allocator.
fn unmap(
&mut self,
virt_addr_range: VirtAddrRange,
allocator: &'static Locked<impl FrameAllocator>,
mem_access_translation: &FixedOffset,
) -> Result<()>;
/// Log the state of the page directory at debug.
fn dump(&self, mem_access_translation: &impl Translate);
}
/// Construct an empty page directory.
/// TODO: Should this be in Arch trait? limitation of generics in traits right now.
pub fn new_page_directory() -> impl PageDirectory
|
{
super::arch::new_page_directory()
}
|
identifier_body
|
|
variant_ref.rs
|
use vtable::VTable;
use std::any::{Any, TypeId};
use std::fmt::{Debug, Display, Error as FmtError, Formatter};
pub struct VariantRef<'a> {
pub data: &'a (),
pub vtable: &'a VTable,
}
impl<'a> VariantRef<'a> {
pub fn new<T: Any>(value: &'a T, vtable: &'a VTable) -> Self {
VariantRef {
data: unsafe { &*(value as *const _ as *const ()) },
vtable: vtable,
}
}
#[inline]
pub fn is<T: Any>(&self) -> bool {
self.vtable.id == TypeId::of::<T>()
}
#[inline]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
if self.is::<T>() {
unsafe { Some(&*(self.data as *const _ as *const T)) }
} else {
None
}
}
#[inline]
pub unsafe fn
|
<T: Any>(&self) -> &T {
debug_assert!(self.is::<T>());
&*(self.data as *const _ as *const T)
}
}
impl<'a> Display for VariantRef<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
(self.vtable.display)(self, f)
}
}
impl<'a> Debug for VariantRef<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
(self.vtable.debug)(self, f)
}
}
|
downcast_ref_unchecked
|
identifier_name
|
variant_ref.rs
|
use vtable::VTable;
use std::any::{Any, TypeId};
use std::fmt::{Debug, Display, Error as FmtError, Formatter};
pub struct VariantRef<'a> {
pub data: &'a (),
pub vtable: &'a VTable,
}
impl<'a> VariantRef<'a> {
pub fn new<T: Any>(value: &'a T, vtable: &'a VTable) -> Self {
VariantRef {
data: unsafe { &*(value as *const _ as *const ()) },
vtable: vtable,
}
}
#[inline]
pub fn is<T: Any>(&self) -> bool {
self.vtable.id == TypeId::of::<T>()
}
#[inline]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
if self.is::<T>() {
unsafe { Some(&*(self.data as *const _ as *const T)) }
} else {
None
}
}
#[inline]
pub unsafe fn downcast_ref_unchecked<T: Any>(&self) -> &T {
debug_assert!(self.is::<T>());
&*(self.data as *const _ as *const T)
}
}
impl<'a> Display for VariantRef<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
(self.vtable.display)(self, f)
}
}
impl<'a> Debug for VariantRef<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError>
|
}
|
{
(self.vtable.debug)(self, f)
}
|
identifier_body
|
variant_ref.rs
|
use vtable::VTable;
use std::any::{Any, TypeId};
use std::fmt::{Debug, Display, Error as FmtError, Formatter};
pub struct VariantRef<'a> {
pub data: &'a (),
pub vtable: &'a VTable,
}
impl<'a> VariantRef<'a> {
pub fn new<T: Any>(value: &'a T, vtable: &'a VTable) -> Self {
VariantRef {
data: unsafe { &*(value as *const _ as *const ()) },
vtable: vtable,
}
}
#[inline]
pub fn is<T: Any>(&self) -> bool {
self.vtable.id == TypeId::of::<T>()
}
#[inline]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
if self.is::<T>()
|
else {
None
}
}
#[inline]
pub unsafe fn downcast_ref_unchecked<T: Any>(&self) -> &T {
debug_assert!(self.is::<T>());
&*(self.data as *const _ as *const T)
}
}
impl<'a> Display for VariantRef<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
(self.vtable.display)(self, f)
}
}
impl<'a> Debug for VariantRef<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
(self.vtable.debug)(self, f)
}
}
|
{
unsafe { Some(&*(self.data as *const _ as *const T)) }
}
|
conditional_block
|
variant_ref.rs
|
use vtable::VTable;
use std::any::{Any, TypeId};
use std::fmt::{Debug, Display, Error as FmtError, Formatter};
pub struct VariantRef<'a> {
pub data: &'a (),
pub vtable: &'a VTable,
}
impl<'a> VariantRef<'a> {
pub fn new<T: Any>(value: &'a T, vtable: &'a VTable) -> Self {
VariantRef {
data: unsafe { &*(value as *const _ as *const ()) },
vtable: vtable,
}
}
#[inline]
pub fn is<T: Any>(&self) -> bool {
self.vtable.id == TypeId::of::<T>()
}
#[inline]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
if self.is::<T>() {
unsafe { Some(&*(self.data as *const _ as *const T)) }
} else {
None
}
}
#[inline]
|
&*(self.data as *const _ as *const T)
}
}
impl<'a> Display for VariantRef<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
(self.vtable.display)(self, f)
}
}
impl<'a> Debug for VariantRef<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
(self.vtable.debug)(self, f)
}
}
|
pub unsafe fn downcast_ref_unchecked<T: Any>(&self) -> &T {
debug_assert!(self.is::<T>());
|
random_line_split
|
save.rs
|
use std::io;
use std::io::prelude::*;
use std::path::Path;
use std::fs::{self, File};
use gb_emu::cart::SaveFile;
pub struct LocalSaveWrapper<'a> {
pub path: &'a Path,
}
impl<'a> SaveFile for LocalSaveWrapper<'a> {
fn load(&mut self, data: &mut [u8]) {
if let Ok(_) = File::open(&self.path).map(|mut f| f.read(data)) {
println!("Loaded {}", self.path.display());
}
}
fn
|
(&mut self, data: &[u8]) {
// First create a temporary file and write to that, to ensure that if an error occurs, the
// old file is not lost.
let tmp_path = self.path.with_extension("sav.tmp");
if let Err(e) = File::create(&tmp_path).map(|mut f| f.write_all(data)) {
println!("An error occured when writing the save file: {}", e);
return;
}
// At this stage the new save file has been successfully written, so we can safely remove
// the old file if it exists.
match fs::remove_file(&self.path) {
Ok(_) => {},
Err(ref e) if e.kind() == io::ErrorKind::NotFound => {},
Err(e) => {
println!("Error removing old save file ({}), current save has been written to: {}",
e, tmp_path.display());
return;
},
}
// Now rename the temporary file to the correct name
if let Err(e) = fs::rename(&tmp_path, &self.path) {
println!("Error renaming temporary save file: {}", e);
}
}
}
|
save
|
identifier_name
|
save.rs
|
use std::io;
use std::io::prelude::*;
use std::path::Path;
use std::fs::{self, File};
use gb_emu::cart::SaveFile;
pub struct LocalSaveWrapper<'a> {
pub path: &'a Path,
}
impl<'a> SaveFile for LocalSaveWrapper<'a> {
fn load(&mut self, data: &mut [u8]) {
if let Ok(_) = File::open(&self.path).map(|mut f| f.read(data)) {
println!("Loaded {}", self.path.display());
}
}
fn save(&mut self, data: &[u8]) {
// First create a temporary file and write to that, to ensure that if an error occurs, the
// old file is not lost.
let tmp_path = self.path.with_extension("sav.tmp");
if let Err(e) = File::create(&tmp_path).map(|mut f| f.write_all(data)) {
println!("An error occured when writing the save file: {}", e);
return;
}
|
match fs::remove_file(&self.path) {
Ok(_) => {},
Err(ref e) if e.kind() == io::ErrorKind::NotFound => {},
Err(e) => {
println!("Error removing old save file ({}), current save has been written to: {}",
e, tmp_path.display());
return;
},
}
// Now rename the temporary file to the correct name
if let Err(e) = fs::rename(&tmp_path, &self.path) {
println!("Error renaming temporary save file: {}", e);
}
}
}
|
// At this stage the new save file has been successfully written, so we can safely remove
// the old file if it exists.
|
random_line_split
|
simple_test.rs
|
extern crate rustorm;
extern crate uuid;
extern crate chrono;
extern crate rustc_serialize;
use uuid::Uuid;
use rustorm::query::Query;
use rustorm::query::Equality;
use rustorm::dao::{Dao, IsDao};
use rustorm::pool::ManagedPool;
use rustorm::table::{IsTable, Table};
#[derive(Debug, Clone)]
pub struct Product {
pub product_id: Uuid,
pub name: Option<String>,
pub description: Option<String>,
}
impl IsDao for Product{
fn from_dao(dao: &Dao) -> Self {
Product {
product_id: dao.get("product_id"),
name: dao.get_opt("name"),
description: dao.get_opt("description"),
}
}
fn to_dao(&self) -> Dao {
let mut dao = Dao::new();
dao.set("product_id", &self.product_id);
match self.name {
Some(ref _value) => dao.set("name", _value),
None => dao.set_null("name"),
}
match self.description {
Some(ref _value) => dao.set("description", _value),
None => dao.set_null("description"),
}
|
fn table() -> Table {
Table {
schema: "bazaar".to_string(),
name: "product".to_string(),
parent_table: None,
sub_table: vec![],
comment: None,
columns: vec![],
is_view: false,
}
}
}
#[test]
fn test_simple_query() {
let url = "postgres://postgres:p0stgr3s@localhost/bazaar_v6";
let pool = ManagedPool::init(&url, 1).unwrap();
let db = pool.connect().unwrap();
let prod: Product = Query::select_all()
.from_table("bazaar.product")
.filter("name", Equality::EQ, &"GTX660 Ti videocard")
.collect_one(db.as_ref())
.unwrap();
println!("{} {} {:?}",
prod.product_id,
prod.name.unwrap(),
prod.description);
}
|
dao
}
}
impl IsTable for Product{
|
random_line_split
|
simple_test.rs
|
extern crate rustorm;
extern crate uuid;
extern crate chrono;
extern crate rustc_serialize;
use uuid::Uuid;
use rustorm::query::Query;
use rustorm::query::Equality;
use rustorm::dao::{Dao, IsDao};
use rustorm::pool::ManagedPool;
use rustorm::table::{IsTable, Table};
#[derive(Debug, Clone)]
pub struct Product {
pub product_id: Uuid,
pub name: Option<String>,
pub description: Option<String>,
}
impl IsDao for Product{
fn from_dao(dao: &Dao) -> Self {
Product {
product_id: dao.get("product_id"),
name: dao.get_opt("name"),
description: dao.get_opt("description"),
}
}
fn to_dao(&self) -> Dao {
let mut dao = Dao::new();
dao.set("product_id", &self.product_id);
match self.name {
Some(ref _value) => dao.set("name", _value),
None => dao.set_null("name"),
}
match self.description {
Some(ref _value) => dao.set("description", _value),
None => dao.set_null("description"),
}
dao
}
}
impl IsTable for Product{
fn table() -> Table {
Table {
schema: "bazaar".to_string(),
name: "product".to_string(),
parent_table: None,
sub_table: vec![],
comment: None,
columns: vec![],
is_view: false,
}
}
}
#[test]
fn test_simple_query()
|
{
let url = "postgres://postgres:p0stgr3s@localhost/bazaar_v6";
let pool = ManagedPool::init(&url, 1).unwrap();
let db = pool.connect().unwrap();
let prod: Product = Query::select_all()
.from_table("bazaar.product")
.filter("name", Equality::EQ, &"GTX660 Ti videocard")
.collect_one(db.as_ref())
.unwrap();
println!("{} {} {:?}",
prod.product_id,
prod.name.unwrap(),
prod.description);
}
|
identifier_body
|
|
simple_test.rs
|
extern crate rustorm;
extern crate uuid;
extern crate chrono;
extern crate rustc_serialize;
use uuid::Uuid;
use rustorm::query::Query;
use rustorm::query::Equality;
use rustorm::dao::{Dao, IsDao};
use rustorm::pool::ManagedPool;
use rustorm::table::{IsTable, Table};
#[derive(Debug, Clone)]
pub struct Product {
pub product_id: Uuid,
pub name: Option<String>,
pub description: Option<String>,
}
impl IsDao for Product{
fn from_dao(dao: &Dao) -> Self {
Product {
product_id: dao.get("product_id"),
name: dao.get_opt("name"),
description: dao.get_opt("description"),
}
}
fn to_dao(&self) -> Dao {
let mut dao = Dao::new();
dao.set("product_id", &self.product_id);
match self.name {
Some(ref _value) => dao.set("name", _value),
None => dao.set_null("name"),
}
match self.description {
Some(ref _value) => dao.set("description", _value),
None => dao.set_null("description"),
}
dao
}
}
impl IsTable for Product{
fn
|
() -> Table {
Table {
schema: "bazaar".to_string(),
name: "product".to_string(),
parent_table: None,
sub_table: vec![],
comment: None,
columns: vec![],
is_view: false,
}
}
}
#[test]
fn test_simple_query() {
let url = "postgres://postgres:p0stgr3s@localhost/bazaar_v6";
let pool = ManagedPool::init(&url, 1).unwrap();
let db = pool.connect().unwrap();
let prod: Product = Query::select_all()
.from_table("bazaar.product")
.filter("name", Equality::EQ, &"GTX660 Ti videocard")
.collect_one(db.as_ref())
.unwrap();
println!("{} {} {:?}",
prod.product_id,
prod.name.unwrap(),
prod.description);
}
|
table
|
identifier_name
|
merkle_tree.rs
|
use std;
extern crate bit_vec;
use super::{UInt256};
use ::serialize::{self, Serializable};
fn reverse_u8(x:u8) -> u8 {
let x:u8 = ((x & 0x55) << 1) | ((x & 0xAA) >> 1);
let x:u8 = ((x & 0x33) << 2) | ((x & 0xCC) >> 2);
let x:u8 = (x << 4) | (x >> 4);
x
}
#[derive(Debug,Default,Clone)]
pub struct PartialMerkleTree {
pub n_transactions: u32,
pub bits: bit_vec::BitVec,
pub hashes: Vec<UInt256>,
}
|
impl std::fmt::Display for PartialMerkleTree {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "PartialMerkleTree(n={}, bits={:?}, hash={:?})", self.n_transactions, self.bits, self.hashes)
}
}
impl Serializable for PartialMerkleTree {
fn get_serialize_size(&self, _ser:&serialize::SerializeParam) -> usize {
4 + (self.bits.len()+7)/8 + 32*self.hashes.len()
}
fn serialize(&self, io:&mut std::io::Write, ser:&serialize::SerializeParam) -> serialize::Result {
let mut r:usize = 0;
r += try!(self.n_transactions.serialize(io, ser));
{
let mut bytes = self.bits.to_bytes();
for byte in &mut bytes {
*byte = reverse_u8(*byte);
}
r += try!(bytes.serialize(io, ser));
}
r += try!(self.hashes.serialize(io, ser));
Ok(r)
}
fn deserialize(&mut self, io:&mut std::io::Read, ser:&serialize::SerializeParam) -> serialize::Result {
let mut r:usize = 0;
r += try!(self.n_transactions.deserialize(io, ser));
{
let mut bytes:Vec<u8> = Vec::new();
r += try!(bytes.deserialize(io, ser));
for byte in &mut bytes {
*byte = reverse_u8(*byte);
}
self.bits = bit_vec::BitVec::from_bytes(bytes.as_slice());
}
r += try!(self.hashes.deserialize(io, ser));
Ok(r)
}
}
|
random_line_split
|
|
merkle_tree.rs
|
use std;
extern crate bit_vec;
use super::{UInt256};
use ::serialize::{self, Serializable};
fn reverse_u8(x:u8) -> u8 {
let x:u8 = ((x & 0x55) << 1) | ((x & 0xAA) >> 1);
let x:u8 = ((x & 0x33) << 2) | ((x & 0xCC) >> 2);
let x:u8 = (x << 4) | (x >> 4);
x
}
#[derive(Debug,Default,Clone)]
pub struct PartialMerkleTree {
pub n_transactions: u32,
pub bits: bit_vec::BitVec,
pub hashes: Vec<UInt256>,
}
impl std::fmt::Display for PartialMerkleTree {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "PartialMerkleTree(n={}, bits={:?}, hash={:?})", self.n_transactions, self.bits, self.hashes)
}
}
impl Serializable for PartialMerkleTree {
fn get_serialize_size(&self, _ser:&serialize::SerializeParam) -> usize {
4 + (self.bits.len()+7)/8 + 32*self.hashes.len()
}
fn
|
(&self, io:&mut std::io::Write, ser:&serialize::SerializeParam) -> serialize::Result {
let mut r:usize = 0;
r += try!(self.n_transactions.serialize(io, ser));
{
let mut bytes = self.bits.to_bytes();
for byte in &mut bytes {
*byte = reverse_u8(*byte);
}
r += try!(bytes.serialize(io, ser));
}
r += try!(self.hashes.serialize(io, ser));
Ok(r)
}
fn deserialize(&mut self, io:&mut std::io::Read, ser:&serialize::SerializeParam) -> serialize::Result {
let mut r:usize = 0;
r += try!(self.n_transactions.deserialize(io, ser));
{
let mut bytes:Vec<u8> = Vec::new();
r += try!(bytes.deserialize(io, ser));
for byte in &mut bytes {
*byte = reverse_u8(*byte);
}
self.bits = bit_vec::BitVec::from_bytes(bytes.as_slice());
}
r += try!(self.hashes.deserialize(io, ser));
Ok(r)
}
}
|
serialize
|
identifier_name
|
hofstadter_q.rs
|
// Implements an iterable version of http://rosettacode.org/wiki/Hofstadter_Q_sequence
// Define a struct which stores the state for the iterator.
struct HofstadterQ {
next: usize,
memoize_vec: Vec<usize>
}
impl HofstadterQ {
// Define a constructor for the struct.
fn new() -> HofstadterQ {
HofstadterQ { next: 1, memoize_vec: vec![1] }
}
}
// Implement the hofstadter q iteration sequence.
impl Iterator for HofstadterQ {
type Item = usize;
// This gets called to fetch the next item of the iterator.
fn next(&mut self) -> Option<usize> {
// Cache the current value.
self.memoize_vec.push(self.next);
// And then calculate the 'next'.
// First, make the four recursive calls.
let current: usize = self.memoize_vec.len();
let rec_call_1: usize = self.memoize_vec[current - 1];
let rec_call_2: usize = self.memoize_vec[current - 2];
let rec_call_3: usize = self.memoize_vec[current - rec_call_1];
let rec_call_4: usize = self.memoize_vec[current - rec_call_2];
// Then update self.next and return it.
self.next = rec_call_3 + rec_call_4;
Some(self.next)
}
}
#[cfg(not(test))]
fn main() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
// The number of terms we want from the iterator.
let upto: usize = 1000;
// Create the iterator.
let mut it = hof.take(upto - 2);
// Print the base values.
println!("H(1) = 1");
println!("H(2) = 1");
// Print the rest of the sequence.
for i in (3..1+upto) {
println!("H({}) = {}", i, it.next().unwrap());
}
}
#[test]
fn
|
() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
// Create the iterator.
let mut it = hof.take(10);
// Test that the first ten values are as expected
// The first two values are hardcoded, so no need to test those.
let hofstadter_q_expected = vec![2,3,3,4,5,5,6,6];
for i in (0..8) {
assert_eq!(hofstadter_q_expected[i], it.next().unwrap());
}
}
#[test]
fn test_thousandth() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
// The number of terms we want from the iterator.
let upto: usize = 1000;
// Create the iterator.
let mut it = hof.take(upto - 2);
let expected: usize = 502;
// Test that the upto-th term is as expected.
for _ in (3..upto) {
it.next();
}
assert_eq!(expected, it.next().unwrap());
}
|
test_first_ten
|
identifier_name
|
hofstadter_q.rs
|
// Implements an iterable version of http://rosettacode.org/wiki/Hofstadter_Q_sequence
// Define a struct which stores the state for the iterator.
struct HofstadterQ {
next: usize,
memoize_vec: Vec<usize>
}
impl HofstadterQ {
// Define a constructor for the struct.
fn new() -> HofstadterQ {
HofstadterQ { next: 1, memoize_vec: vec![1] }
}
}
// Implement the hofstadter q iteration sequence.
impl Iterator for HofstadterQ {
type Item = usize;
// This gets called to fetch the next item of the iterator.
fn next(&mut self) -> Option<usize>
|
}
#[cfg(not(test))]
fn main() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
// The number of terms we want from the iterator.
let upto: usize = 1000;
// Create the iterator.
let mut it = hof.take(upto - 2);
// Print the base values.
println!("H(1) = 1");
println!("H(2) = 1");
// Print the rest of the sequence.
for i in (3..1+upto) {
println!("H({}) = {}", i, it.next().unwrap());
}
}
#[test]
fn test_first_ten() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
// Create the iterator.
let mut it = hof.take(10);
// Test that the first ten values are as expected
// The first two values are hardcoded, so no need to test those.
let hofstadter_q_expected = vec![2,3,3,4,5,5,6,6];
for i in (0..8) {
assert_eq!(hofstadter_q_expected[i], it.next().unwrap());
}
}
#[test]
fn test_thousandth() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
// The number of terms we want from the iterator.
let upto: usize = 1000;
// Create the iterator.
let mut it = hof.take(upto - 2);
let expected: usize = 502;
// Test that the upto-th term is as expected.
for _ in (3..upto) {
it.next();
}
assert_eq!(expected, it.next().unwrap());
}
|
{
// Cache the current value.
self.memoize_vec.push(self.next);
// And then calculate the 'next'.
// First, make the four recursive calls.
let current: usize = self.memoize_vec.len();
let rec_call_1: usize = self.memoize_vec[current - 1];
let rec_call_2: usize = self.memoize_vec[current - 2];
let rec_call_3: usize = self.memoize_vec[current - rec_call_1];
let rec_call_4: usize = self.memoize_vec[current - rec_call_2];
// Then update self.next and return it.
self.next = rec_call_3 + rec_call_4;
Some(self.next)
}
|
identifier_body
|
hofstadter_q.rs
|
// Implements an iterable version of http://rosettacode.org/wiki/Hofstadter_Q_sequence
// Define a struct which stores the state for the iterator.
struct HofstadterQ {
next: usize,
memoize_vec: Vec<usize>
}
impl HofstadterQ {
// Define a constructor for the struct.
fn new() -> HofstadterQ {
HofstadterQ { next: 1, memoize_vec: vec![1] }
}
}
// Implement the hofstadter q iteration sequence.
impl Iterator for HofstadterQ {
type Item = usize;
// This gets called to fetch the next item of the iterator.
fn next(&mut self) -> Option<usize> {
// Cache the current value.
self.memoize_vec.push(self.next);
// And then calculate the 'next'.
// First, make the four recursive calls.
let current: usize = self.memoize_vec.len();
let rec_call_1: usize = self.memoize_vec[current - 1];
let rec_call_2: usize = self.memoize_vec[current - 2];
let rec_call_3: usize = self.memoize_vec[current - rec_call_1];
let rec_call_4: usize = self.memoize_vec[current - rec_call_2];
// Then update self.next and return it.
self.next = rec_call_3 + rec_call_4;
Some(self.next)
}
}
#[cfg(not(test))]
fn main() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
|
// The number of terms we want from the iterator.
let upto: usize = 1000;
// Create the iterator.
let mut it = hof.take(upto - 2);
// Print the base values.
println!("H(1) = 1");
println!("H(2) = 1");
// Print the rest of the sequence.
for i in (3..1+upto) {
println!("H({}) = {}", i, it.next().unwrap());
}
}
#[test]
fn test_first_ten() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
// Create the iterator.
let mut it = hof.take(10);
// Test that the first ten values are as expected
// The first two values are hardcoded, so no need to test those.
let hofstadter_q_expected = vec![2,3,3,4,5,5,6,6];
for i in (0..8) {
assert_eq!(hofstadter_q_expected[i], it.next().unwrap());
}
}
#[test]
fn test_thousandth() {
// Set up the iterable.
let hof: HofstadterQ = HofstadterQ::new();
// The number of terms we want from the iterator.
let upto: usize = 1000;
// Create the iterator.
let mut it = hof.take(upto - 2);
let expected: usize = 502;
// Test that the upto-th term is as expected.
for _ in (3..upto) {
it.next();
}
assert_eq!(expected, it.next().unwrap());
}
|
random_line_split
|
|
ike.rs
|
/* Copyright (C) 2020 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// Author: Frank Honza <[email protected]>
extern crate ipsec_parser;
use self::ipsec_parser::*;
use crate::applayer;
use crate::applayer::*;
use crate::core::{
self, AppProto, Flow, ALPROTO_FAILED, ALPROTO_UNKNOWN, STREAM_TOCLIENT, STREAM_TOSERVER,
};
use crate::ike::ikev1::{handle_ikev1, IkeV1Header, Ikev1Container};
use crate::ike::ikev2::{handle_ikev2, Ikev2Container};
use crate::ike::parser::*;
use nom;
use std;
use std::collections::HashSet;
use std::ffi::CString;
#[derive(AppLayerEvent)]
pub enum IkeEvent {
MalformedData,
NoEncryption,
WeakCryptoEnc,
WeakCryptoPrf,
WeakCryptoDh,
WeakCryptoAuth,
WeakCryptoNoDh,
WeakCryptoNoAuth,
InvalidProposal,
UnknownProposal,
PayloadExtraData,
MultipleServerProposal,
}
pub struct IkeHeaderWrapper {
pub spi_initiator: String,
pub spi_responder: String,
pub maj_ver: u8,
pub min_ver: u8,
pub msg_id: u32,
pub flags: u8,
pub ikev1_transforms: Vec<Vec<SaAttribute>>,
pub ikev2_transforms: Vec<Vec<IkeV2Transform>>,
pub ikev1_header: IkeV1Header,
pub ikev2_header: IkeV2Header,
}
impl IkeHeaderWrapper {
pub fn new() -> IkeHeaderWrapper {
IkeHeaderWrapper {
spi_initiator: String::new(),
spi_responder: String::new(),
maj_ver: 0,
min_ver: 0,
msg_id: 0,
flags: 0,
ikev1_transforms: Vec::new(),
ikev2_transforms: Vec::new(),
ikev1_header: IkeV1Header::default(),
ikev2_header: IkeV2Header {
init_spi: 0,
resp_spi: 0,
next_payload: IkePayloadType::NoNextPayload,
maj_ver: 0,
min_ver: 0,
exch_type: IkeExchangeType(0),
flags: 0,
msg_id: 0,
length: 0,
},
}
}
}
#[derive(Default)]
pub struct IkePayloadWrapper {
pub ikev1_payload_types: Option<HashSet<u8>>,
pub ikev2_payload_types: Vec<IkePayloadType>,
}
pub struct IKETransaction {
tx_id: u64,
pub ike_version: u8,
pub hdr: IkeHeaderWrapper,
pub payload_types: IkePayloadWrapper,
pub notify_types: Vec<NotifyType>,
/// errors seen during exchange
pub errors: u32,
logged: LoggerFlags,
de_state: Option<*mut core::DetectEngineState>,
events: *mut core::AppLayerDecoderEvents,
tx_data: applayer::AppLayerTxData,
}
impl IKETransaction {
pub fn new() -> IKETransaction {
IKETransaction {
tx_id: 0,
ike_version: 0,
hdr: IkeHeaderWrapper::new(),
payload_types: Default::default(),
notify_types: vec![],
logged: LoggerFlags::new(),
de_state: None,
events: std::ptr::null_mut(),
tx_data: applayer::AppLayerTxData::new(),
errors: 0,
}
}
pub fn free(&mut self) {
if self.events!= std::ptr::null_mut() {
core::sc_app_layer_decoder_events_free_events(&mut self.events);
}
if let Some(state) = self.de_state {
core::sc_detect_engine_state_free(state);
}
}
}
impl Drop for IKETransaction {
fn drop(&mut self) {
self.free();
}
}
#[derive(Default)]
pub struct IKEState {
tx_id: u64,
pub transactions: Vec<IKETransaction>,
pub ikev1_container: Ikev1Container,
pub ikev2_container: Ikev2Container,
}
impl IKEState {
// Free a transaction by ID.
fn free_tx(&mut self, tx_id: u64) {
let tx = self
.transactions
.iter()
.position(|tx| tx.tx_id == tx_id + 1);
debug_assert!(tx!= None);
if let Some(idx) = tx {
let _ = self.transactions.remove(idx);
}
}
pub fn get_tx(&mut self, tx_id: u64) -> Option<&mut IKETransaction> {
for tx in &mut self.transactions {
if tx.tx_id == tx_id + 1 {
return Some(tx);
}
}
return None;
}
pub fn new_tx(&mut self) -> IKETransaction {
let mut tx = IKETransaction::new();
self.tx_id += 1;
tx.tx_id = self.tx_id;
return tx;
}
/// Set an event. The event is set on the most recent transaction.
pub fn set_event(&mut self, event: IkeEvent) {
if let Some(tx) = self.transactions.last_mut() {
let ev = event as u8;
core::sc_app_layer_decoder_events_set_event_raw(&mut tx.events, ev);
} else {
SCLogDebug!(
"IKE: trying to set event {} on non-existing transaction",
event as u32
);
}
}
fn handle_input(&mut self, input: &[u8], direction: u8) -> AppLayerResult {
// We're not interested in empty requests.
if input.len() == 0 {
return AppLayerResult::ok();
}
let mut current = input;
match parse_isakmp_header(current) {
Ok((rem, isakmp_header)) => {
current = rem;
if isakmp_header.maj_ver!= 1 && isakmp_header.maj_ver!= 2 {
SCLogDebug!("Unsupported ISAKMP major_version");
return AppLayerResult::err();
}
if isakmp_header.maj_ver == 1 {
handle_ikev1(self, current, isakmp_header, direction);
} else if isakmp_header.maj_ver == 2 {
handle_ikev2(self, current, isakmp_header, direction);
} else {
return AppLayerResult::err();
}
return AppLayerResult::ok(); // todo either remove outer loop or check header length-field if we have completely read everything
}
Err(nom::Err::Incomplete(_)) => {
SCLogDebug!("Insufficient data while parsing IKE");
return AppLayerResult::err();
}
Err(_) => {
SCLogDebug!("Error while parsing IKE packet");
return AppLayerResult::err();
}
}
}
fn tx_iterator(
&mut self, min_tx_id: u64, state: &mut u64,
) -> Option<(&IKETransaction, u64, bool)> {
let mut index = *state as usize;
let len = self.transactions.len();
while index < len {
let tx = &self.transactions[index];
if tx.tx_id < min_tx_id + 1 {
index += 1;
continue;
}
*state = index as u64;
return Some((tx, tx.tx_id - 1, (len - index) > 1));
}
return None;
}
}
/// Probe to see if this input looks like a request or response.
fn probe(input: &[u8], direction: u8, rdir: *mut u8) -> bool {
match parse_isakmp_header(input) {
Ok((_, isakmp_header)) => {
if isakmp_header.maj_ver == 1 {
if isakmp_header.resp_spi == 0 && direction!= STREAM_TOSERVER {
unsafe {
*rdir = STREAM_TOSERVER;
}
}
return true;
} else if isakmp_header.maj_ver == 2 {
if isakmp_header.min_ver!= 0 {
SCLogDebug!(
"ipsec_probe: could be ipsec, but with unsupported/invalid version {}.{}",
isakmp_header.maj_ver,
isakmp_header.min_ver
);
return false;
}
if isakmp_header.exch_type < 34 || isakmp_header.exch_type > 37 {
SCLogDebug!("ipsec_probe: could be ipsec, but with unsupported/invalid exchange type {}",
isakmp_header.exch_type);
return false;
}
if isakmp_header.length as usize!= input.len() {
SCLogDebug!("ipsec_probe: could be ipsec, but length does not match");
return false;
}
if isakmp_header.resp_spi == 0 && direction!= STREAM_TOSERVER {
unsafe {
*rdir = STREAM_TOSERVER;
}
}
return true;
}
return false;
}
Err(_) => return false,
}
}
// C exports.
export_tx_get_detect_state!(rs_ike_tx_get_detect_state, IKETransaction);
export_tx_set_detect_state!(rs_ike_tx_set_detect_state, IKETransaction);
/// C entry point for a probing parser.
#[no_mangle]
pub unsafe extern "C" fn rs_ike_probing_parser(
_flow: *const Flow, direction: u8, input: *const u8, input_len: u32, rdir: *mut u8,
) -> AppProto {
if input_len < 28 {
// at least the ISAKMP_HEADER must be there, not ALPROTO_UNKNOWN because over UDP
return ALPROTO_FAILED;
}
if input!= std::ptr::null_mut() {
let slice = build_slice!(input, input_len as usize);
if probe(slice, direction, rdir) {
return ALPROTO_IKE ;
}
}
return ALPROTO_FAILED;
}
#[no_mangle]
pub extern "C" fn rs_ike_state_new(
_orig_state: *mut std::os::raw::c_void, _orig_proto: AppProto,
) -> *mut std::os::raw::c_void {
let state = IKEState::default();
let boxed = Box::new(state);
return Box::into_raw(boxed) as *mut _;
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_free(state: *mut std::os::raw::c_void) {
// Just unbox...
std::mem::drop(Box::from_raw(state as *mut IKEState));
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_tx_free(state: *mut std::os::raw::c_void, tx_id: u64) {
let state = cast_pointer!(state, IKEState);
state.free_tx(tx_id);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_parse_request(
_flow: *const Flow, state: *mut std::os::raw::c_void, _pstate: *mut std::os::raw::c_void,
input: *const u8, input_len: u32, _data: *const std::os::raw::c_void, _flags: u8,
) -> AppLayerResult {
let state = cast_pointer!(state, IKEState);
let buf = build_slice!(input, input_len as usize);
return state.handle_input(buf, STREAM_TOSERVER);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_parse_response(
_flow: *const Flow, state: *mut std::os::raw::c_void, _pstate: *mut std::os::raw::c_void,
input: *const u8, input_len: u32, _data: *const std::os::raw::c_void, _flags: u8,
) -> AppLayerResult {
let state = cast_pointer!(state, IKEState);
let buf = build_slice!(input, input_len as usize);
return state.handle_input(buf, STREAM_TOCLIENT);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx(
state: *mut std::os::raw::c_void, tx_id: u64,
) -> *mut std::os::raw::c_void {
let state = cast_pointer!(state, IKEState);
match state.get_tx(tx_id) {
Some(tx) => {
return tx as *const _ as *mut _;
}
None => {
return std::ptr::null_mut();
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx_count(state: *mut std::os::raw::c_void) -> u64 {
let state = cast_pointer!(state, IKEState);
return state.tx_id;
}
#[no_mangle]
pub extern "C" fn rs_ike_state_progress_completion_status(_direction: u8) -> std::os::raw::c_int {
// This parser uses 1 to signal transaction completion status.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_ike_tx_get_alstate_progress(
_tx: *mut std::os::raw::c_void, _direction: u8,
) -> std::os::raw::c_int {
return 1;
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_tx_get_logged(
_state: *mut std::os::raw::c_void, tx: *mut std::os::raw::c_void,
) -> u32 {
let tx = cast_pointer!(tx, IKETransaction);
return tx.logged.get();
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_tx_set_logged(
_state: *mut std::os::raw::c_void, tx: *mut std::os::raw::c_void, logged: u32,
) {
let tx = cast_pointer!(tx, IKETransaction);
tx.logged.set(logged);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_events(
tx: *mut std::os::raw::c_void,
) -> *mut core::AppLayerDecoderEvents
|
static mut ALPROTO_IKE : AppProto = ALPROTO_UNKNOWN;
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx_iterator(
_ipproto: u8, _alproto: AppProto, state: *mut std::os::raw::c_void, min_tx_id: u64,
_max_tx_id: u64, istate: &mut u64,
) -> applayer::AppLayerGetTxIterTuple {
let state = cast_pointer!(state, IKEState);
match state.tx_iterator(min_tx_id, istate) {
Some((tx, out_tx_id, has_next)) => {
let c_tx = tx as *const _ as *mut _;
let ires = applayer::AppLayerGetTxIterTuple::with_values(c_tx, out_tx_id, has_next);
return ires;
}
None => {
return applayer::AppLayerGetTxIterTuple::not_found();
}
}
}
// Parser name as a C style string.
const PARSER_NAME: &'static [u8] = b"ike\0";
const PARSER_ALIAS: &'static [u8] = b"ikev2\0";
export_tx_data_get!(rs_ike_get_tx_data, IKETransaction);
#[no_mangle]
pub unsafe extern "C" fn rs_ike_register_parser() {
let default_port = CString::new("500").unwrap();
let parser = RustParser {
name : PARSER_NAME.as_ptr() as *const std::os::raw::c_char,
default_port : default_port.as_ptr(),
ipproto : core::IPPROTO_UDP,
probe_ts : Some(rs_ike_probing_parser),
probe_tc : Some(rs_ike_probing_parser),
min_depth : 0,
max_depth : 16,
state_new : rs_ike_state_new,
state_free : rs_ike_state_free,
tx_free : rs_ike_state_tx_free,
parse_ts : rs_ike_parse_request,
parse_tc : rs_ike_parse_response,
get_tx_count : rs_ike_state_get_tx_count,
get_tx : rs_ike_state_get_tx,
tx_comp_st_ts : 1,
tx_comp_st_tc : 1,
tx_get_progress : rs_ike_tx_get_alstate_progress,
get_de_state : rs_ike_tx_get_detect_state,
set_de_state : rs_ike_tx_set_detect_state,
get_events : Some(rs_ike_state_get_events),
get_eventinfo : Some(IkeEvent::get_event_info),
get_eventinfo_byid : Some(IkeEvent::get_event_info_by_id),
localstorage_new : None,
localstorage_free : None,
get_files : None,
get_tx_iterator : None,
get_tx_data : rs_ike_get_tx_data,
apply_tx_config : None,
flags : APP_LAYER_PARSER_OPT_UNIDIR_TXS,
truncate : None,
};
let ip_proto_str = CString::new("udp").unwrap();
if AppLayerProtoDetectConfProtoDetectionEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let alproto = AppLayerRegisterProtocolDetection(&parser, 1);
ALPROTO_IKE = alproto;
if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let _ = AppLayerRegisterParser(&parser, alproto);
}
AppLayerRegisterParserAlias(
PARSER_NAME.as_ptr() as *const std::os::raw::c_char,
PARSER_ALIAS.as_ptr() as *const std::os::raw::c_char,
);
SCLogDebug!("Rust IKE parser registered.");
} else {
SCLogDebug!("Protocol detector and parser disabled for IKE.");
}
}
|
{
let tx = cast_pointer!(tx, IKETransaction);
return tx.events;
}
|
identifier_body
|
ike.rs
|
/* Copyright (C) 2020 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// Author: Frank Honza <[email protected]>
extern crate ipsec_parser;
use self::ipsec_parser::*;
use crate::applayer;
use crate::applayer::*;
use crate::core::{
self, AppProto, Flow, ALPROTO_FAILED, ALPROTO_UNKNOWN, STREAM_TOCLIENT, STREAM_TOSERVER,
};
use crate::ike::ikev1::{handle_ikev1, IkeV1Header, Ikev1Container};
use crate::ike::ikev2::{handle_ikev2, Ikev2Container};
use crate::ike::parser::*;
use nom;
use std;
use std::collections::HashSet;
use std::ffi::CString;
#[derive(AppLayerEvent)]
pub enum IkeEvent {
MalformedData,
NoEncryption,
WeakCryptoEnc,
WeakCryptoPrf,
WeakCryptoDh,
WeakCryptoAuth,
WeakCryptoNoDh,
WeakCryptoNoAuth,
InvalidProposal,
UnknownProposal,
PayloadExtraData,
MultipleServerProposal,
}
pub struct IkeHeaderWrapper {
pub spi_initiator: String,
pub spi_responder: String,
pub maj_ver: u8,
pub min_ver: u8,
pub msg_id: u32,
pub flags: u8,
pub ikev1_transforms: Vec<Vec<SaAttribute>>,
pub ikev2_transforms: Vec<Vec<IkeV2Transform>>,
pub ikev1_header: IkeV1Header,
pub ikev2_header: IkeV2Header,
}
impl IkeHeaderWrapper {
pub fn new() -> IkeHeaderWrapper {
IkeHeaderWrapper {
spi_initiator: String::new(),
spi_responder: String::new(),
maj_ver: 0,
min_ver: 0,
msg_id: 0,
flags: 0,
ikev1_transforms: Vec::new(),
ikev2_transforms: Vec::new(),
ikev1_header: IkeV1Header::default(),
ikev2_header: IkeV2Header {
init_spi: 0,
resp_spi: 0,
next_payload: IkePayloadType::NoNextPayload,
maj_ver: 0,
min_ver: 0,
exch_type: IkeExchangeType(0),
flags: 0,
msg_id: 0,
length: 0,
},
}
}
}
#[derive(Default)]
pub struct IkePayloadWrapper {
pub ikev1_payload_types: Option<HashSet<u8>>,
pub ikev2_payload_types: Vec<IkePayloadType>,
}
pub struct IKETransaction {
tx_id: u64,
pub ike_version: u8,
pub hdr: IkeHeaderWrapper,
pub payload_types: IkePayloadWrapper,
pub notify_types: Vec<NotifyType>,
/// errors seen during exchange
pub errors: u32,
logged: LoggerFlags,
de_state: Option<*mut core::DetectEngineState>,
events: *mut core::AppLayerDecoderEvents,
tx_data: applayer::AppLayerTxData,
}
impl IKETransaction {
pub fn new() -> IKETransaction {
IKETransaction {
tx_id: 0,
ike_version: 0,
hdr: IkeHeaderWrapper::new(),
payload_types: Default::default(),
notify_types: vec![],
logged: LoggerFlags::new(),
de_state: None,
events: std::ptr::null_mut(),
tx_data: applayer::AppLayerTxData::new(),
errors: 0,
}
}
pub fn free(&mut self) {
if self.events!= std::ptr::null_mut() {
core::sc_app_layer_decoder_events_free_events(&mut self.events);
}
if let Some(state) = self.de_state {
core::sc_detect_engine_state_free(state);
}
}
}
impl Drop for IKETransaction {
fn drop(&mut self) {
self.free();
}
}
#[derive(Default)]
pub struct IKEState {
tx_id: u64,
pub transactions: Vec<IKETransaction>,
pub ikev1_container: Ikev1Container,
pub ikev2_container: Ikev2Container,
}
impl IKEState {
// Free a transaction by ID.
fn free_tx(&mut self, tx_id: u64) {
let tx = self
.transactions
.iter()
.position(|tx| tx.tx_id == tx_id + 1);
debug_assert!(tx!= None);
if let Some(idx) = tx {
let _ = self.transactions.remove(idx);
}
}
pub fn get_tx(&mut self, tx_id: u64) -> Option<&mut IKETransaction> {
for tx in &mut self.transactions {
if tx.tx_id == tx_id + 1 {
return Some(tx);
}
}
return None;
}
pub fn new_tx(&mut self) -> IKETransaction {
let mut tx = IKETransaction::new();
self.tx_id += 1;
tx.tx_id = self.tx_id;
return tx;
}
/// Set an event. The event is set on the most recent transaction.
pub fn set_event(&mut self, event: IkeEvent) {
if let Some(tx) = self.transactions.last_mut() {
let ev = event as u8;
core::sc_app_layer_decoder_events_set_event_raw(&mut tx.events, ev);
} else {
SCLogDebug!(
"IKE: trying to set event {} on non-existing transaction",
event as u32
);
}
}
fn handle_input(&mut self, input: &[u8], direction: u8) -> AppLayerResult {
// We're not interested in empty requests.
if input.len() == 0 {
return AppLayerResult::ok();
}
let mut current = input;
match parse_isakmp_header(current) {
Ok((rem, isakmp_header)) => {
current = rem;
if isakmp_header.maj_ver!= 1 && isakmp_header.maj_ver!= 2 {
SCLogDebug!("Unsupported ISAKMP major_version");
return AppLayerResult::err();
}
if isakmp_header.maj_ver == 1 {
handle_ikev1(self, current, isakmp_header, direction);
} else if isakmp_header.maj_ver == 2 {
handle_ikev2(self, current, isakmp_header, direction);
} else {
return AppLayerResult::err();
}
return AppLayerResult::ok(); // todo either remove outer loop or check header length-field if we have completely read everything
}
Err(nom::Err::Incomplete(_)) => {
SCLogDebug!("Insufficient data while parsing IKE");
return AppLayerResult::err();
}
Err(_) => {
SCLogDebug!("Error while parsing IKE packet");
return AppLayerResult::err();
}
}
}
fn tx_iterator(
&mut self, min_tx_id: u64, state: &mut u64,
) -> Option<(&IKETransaction, u64, bool)> {
let mut index = *state as usize;
let len = self.transactions.len();
while index < len {
let tx = &self.transactions[index];
if tx.tx_id < min_tx_id + 1 {
index += 1;
continue;
}
*state = index as u64;
return Some((tx, tx.tx_id - 1, (len - index) > 1));
}
return None;
}
}
/// Probe to see if this input looks like a request or response.
fn probe(input: &[u8], direction: u8, rdir: *mut u8) -> bool {
match parse_isakmp_header(input) {
Ok((_, isakmp_header)) => {
if isakmp_header.maj_ver == 1 {
if isakmp_header.resp_spi == 0 && direction!= STREAM_TOSERVER {
unsafe {
*rdir = STREAM_TOSERVER;
}
}
return true;
} else if isakmp_header.maj_ver == 2 {
if isakmp_header.min_ver!= 0 {
SCLogDebug!(
"ipsec_probe: could be ipsec, but with unsupported/invalid version {}.{}",
isakmp_header.maj_ver,
isakmp_header.min_ver
);
return false;
}
if isakmp_header.exch_type < 34 || isakmp_header.exch_type > 37 {
SCLogDebug!("ipsec_probe: could be ipsec, but with unsupported/invalid exchange type {}",
|
isakmp_header.exch_type);
return false;
}
if isakmp_header.length as usize!= input.len() {
SCLogDebug!("ipsec_probe: could be ipsec, but length does not match");
return false;
}
if isakmp_header.resp_spi == 0 && direction!= STREAM_TOSERVER {
unsafe {
*rdir = STREAM_TOSERVER;
}
}
return true;
}
return false;
}
Err(_) => return false,
}
}
// C exports.
export_tx_get_detect_state!(rs_ike_tx_get_detect_state, IKETransaction);
export_tx_set_detect_state!(rs_ike_tx_set_detect_state, IKETransaction);
/// C entry point for a probing parser.
#[no_mangle]
pub unsafe extern "C" fn rs_ike_probing_parser(
_flow: *const Flow, direction: u8, input: *const u8, input_len: u32, rdir: *mut u8,
) -> AppProto {
if input_len < 28 {
// at least the ISAKMP_HEADER must be there, not ALPROTO_UNKNOWN because over UDP
return ALPROTO_FAILED;
}
if input!= std::ptr::null_mut() {
let slice = build_slice!(input, input_len as usize);
if probe(slice, direction, rdir) {
return ALPROTO_IKE ;
}
}
return ALPROTO_FAILED;
}
#[no_mangle]
pub extern "C" fn rs_ike_state_new(
_orig_state: *mut std::os::raw::c_void, _orig_proto: AppProto,
) -> *mut std::os::raw::c_void {
let state = IKEState::default();
let boxed = Box::new(state);
return Box::into_raw(boxed) as *mut _;
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_free(state: *mut std::os::raw::c_void) {
// Just unbox...
std::mem::drop(Box::from_raw(state as *mut IKEState));
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_tx_free(state: *mut std::os::raw::c_void, tx_id: u64) {
let state = cast_pointer!(state, IKEState);
state.free_tx(tx_id);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_parse_request(
_flow: *const Flow, state: *mut std::os::raw::c_void, _pstate: *mut std::os::raw::c_void,
input: *const u8, input_len: u32, _data: *const std::os::raw::c_void, _flags: u8,
) -> AppLayerResult {
let state = cast_pointer!(state, IKEState);
let buf = build_slice!(input, input_len as usize);
return state.handle_input(buf, STREAM_TOSERVER);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_parse_response(
_flow: *const Flow, state: *mut std::os::raw::c_void, _pstate: *mut std::os::raw::c_void,
input: *const u8, input_len: u32, _data: *const std::os::raw::c_void, _flags: u8,
) -> AppLayerResult {
let state = cast_pointer!(state, IKEState);
let buf = build_slice!(input, input_len as usize);
return state.handle_input(buf, STREAM_TOCLIENT);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx(
state: *mut std::os::raw::c_void, tx_id: u64,
) -> *mut std::os::raw::c_void {
let state = cast_pointer!(state, IKEState);
match state.get_tx(tx_id) {
Some(tx) => {
return tx as *const _ as *mut _;
}
None => {
return std::ptr::null_mut();
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx_count(state: *mut std::os::raw::c_void) -> u64 {
let state = cast_pointer!(state, IKEState);
return state.tx_id;
}
#[no_mangle]
pub extern "C" fn rs_ike_state_progress_completion_status(_direction: u8) -> std::os::raw::c_int {
// This parser uses 1 to signal transaction completion status.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_ike_tx_get_alstate_progress(
_tx: *mut std::os::raw::c_void, _direction: u8,
) -> std::os::raw::c_int {
return 1;
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_tx_get_logged(
_state: *mut std::os::raw::c_void, tx: *mut std::os::raw::c_void,
) -> u32 {
let tx = cast_pointer!(tx, IKETransaction);
return tx.logged.get();
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_tx_set_logged(
_state: *mut std::os::raw::c_void, tx: *mut std::os::raw::c_void, logged: u32,
) {
let tx = cast_pointer!(tx, IKETransaction);
tx.logged.set(logged);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_events(
tx: *mut std::os::raw::c_void,
) -> *mut core::AppLayerDecoderEvents {
let tx = cast_pointer!(tx, IKETransaction);
return tx.events;
}
static mut ALPROTO_IKE : AppProto = ALPROTO_UNKNOWN;
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx_iterator(
_ipproto: u8, _alproto: AppProto, state: *mut std::os::raw::c_void, min_tx_id: u64,
_max_tx_id: u64, istate: &mut u64,
) -> applayer::AppLayerGetTxIterTuple {
let state = cast_pointer!(state, IKEState);
match state.tx_iterator(min_tx_id, istate) {
Some((tx, out_tx_id, has_next)) => {
let c_tx = tx as *const _ as *mut _;
let ires = applayer::AppLayerGetTxIterTuple::with_values(c_tx, out_tx_id, has_next);
return ires;
}
None => {
return applayer::AppLayerGetTxIterTuple::not_found();
}
}
}
// Parser name as a C style string.
const PARSER_NAME: &'static [u8] = b"ike\0";
const PARSER_ALIAS: &'static [u8] = b"ikev2\0";
export_tx_data_get!(rs_ike_get_tx_data, IKETransaction);
#[no_mangle]
pub unsafe extern "C" fn rs_ike_register_parser() {
let default_port = CString::new("500").unwrap();
let parser = RustParser {
name : PARSER_NAME.as_ptr() as *const std::os::raw::c_char,
default_port : default_port.as_ptr(),
ipproto : core::IPPROTO_UDP,
probe_ts : Some(rs_ike_probing_parser),
probe_tc : Some(rs_ike_probing_parser),
min_depth : 0,
max_depth : 16,
state_new : rs_ike_state_new,
state_free : rs_ike_state_free,
tx_free : rs_ike_state_tx_free,
parse_ts : rs_ike_parse_request,
parse_tc : rs_ike_parse_response,
get_tx_count : rs_ike_state_get_tx_count,
get_tx : rs_ike_state_get_tx,
tx_comp_st_ts : 1,
tx_comp_st_tc : 1,
tx_get_progress : rs_ike_tx_get_alstate_progress,
get_de_state : rs_ike_tx_get_detect_state,
set_de_state : rs_ike_tx_set_detect_state,
get_events : Some(rs_ike_state_get_events),
get_eventinfo : Some(IkeEvent::get_event_info),
get_eventinfo_byid : Some(IkeEvent::get_event_info_by_id),
localstorage_new : None,
localstorage_free : None,
get_files : None,
get_tx_iterator : None,
get_tx_data : rs_ike_get_tx_data,
apply_tx_config : None,
flags : APP_LAYER_PARSER_OPT_UNIDIR_TXS,
truncate : None,
};
let ip_proto_str = CString::new("udp").unwrap();
if AppLayerProtoDetectConfProtoDetectionEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let alproto = AppLayerRegisterProtocolDetection(&parser, 1);
ALPROTO_IKE = alproto;
if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let _ = AppLayerRegisterParser(&parser, alproto);
}
AppLayerRegisterParserAlias(
PARSER_NAME.as_ptr() as *const std::os::raw::c_char,
PARSER_ALIAS.as_ptr() as *const std::os::raw::c_char,
);
SCLogDebug!("Rust IKE parser registered.");
} else {
SCLogDebug!("Protocol detector and parser disabled for IKE.");
}
}
|
random_line_split
|
|
ike.rs
|
/* Copyright (C) 2020 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// Author: Frank Honza <[email protected]>
extern crate ipsec_parser;
use self::ipsec_parser::*;
use crate::applayer;
use crate::applayer::*;
use crate::core::{
self, AppProto, Flow, ALPROTO_FAILED, ALPROTO_UNKNOWN, STREAM_TOCLIENT, STREAM_TOSERVER,
};
use crate::ike::ikev1::{handle_ikev1, IkeV1Header, Ikev1Container};
use crate::ike::ikev2::{handle_ikev2, Ikev2Container};
use crate::ike::parser::*;
use nom;
use std;
use std::collections::HashSet;
use std::ffi::CString;
#[derive(AppLayerEvent)]
pub enum IkeEvent {
MalformedData,
NoEncryption,
WeakCryptoEnc,
WeakCryptoPrf,
WeakCryptoDh,
WeakCryptoAuth,
WeakCryptoNoDh,
WeakCryptoNoAuth,
InvalidProposal,
UnknownProposal,
PayloadExtraData,
MultipleServerProposal,
}
pub struct IkeHeaderWrapper {
pub spi_initiator: String,
pub spi_responder: String,
pub maj_ver: u8,
pub min_ver: u8,
pub msg_id: u32,
pub flags: u8,
pub ikev1_transforms: Vec<Vec<SaAttribute>>,
pub ikev2_transforms: Vec<Vec<IkeV2Transform>>,
pub ikev1_header: IkeV1Header,
pub ikev2_header: IkeV2Header,
}
impl IkeHeaderWrapper {
pub fn new() -> IkeHeaderWrapper {
IkeHeaderWrapper {
spi_initiator: String::new(),
spi_responder: String::new(),
maj_ver: 0,
min_ver: 0,
msg_id: 0,
flags: 0,
ikev1_transforms: Vec::new(),
ikev2_transforms: Vec::new(),
ikev1_header: IkeV1Header::default(),
ikev2_header: IkeV2Header {
init_spi: 0,
resp_spi: 0,
next_payload: IkePayloadType::NoNextPayload,
maj_ver: 0,
min_ver: 0,
exch_type: IkeExchangeType(0),
flags: 0,
msg_id: 0,
length: 0,
},
}
}
}
#[derive(Default)]
pub struct IkePayloadWrapper {
pub ikev1_payload_types: Option<HashSet<u8>>,
pub ikev2_payload_types: Vec<IkePayloadType>,
}
pub struct IKETransaction {
tx_id: u64,
pub ike_version: u8,
pub hdr: IkeHeaderWrapper,
pub payload_types: IkePayloadWrapper,
pub notify_types: Vec<NotifyType>,
/// errors seen during exchange
pub errors: u32,
logged: LoggerFlags,
de_state: Option<*mut core::DetectEngineState>,
events: *mut core::AppLayerDecoderEvents,
tx_data: applayer::AppLayerTxData,
}
impl IKETransaction {
pub fn new() -> IKETransaction {
IKETransaction {
tx_id: 0,
ike_version: 0,
hdr: IkeHeaderWrapper::new(),
payload_types: Default::default(),
notify_types: vec![],
logged: LoggerFlags::new(),
de_state: None,
events: std::ptr::null_mut(),
tx_data: applayer::AppLayerTxData::new(),
errors: 0,
}
}
pub fn free(&mut self) {
if self.events!= std::ptr::null_mut() {
core::sc_app_layer_decoder_events_free_events(&mut self.events);
}
if let Some(state) = self.de_state {
core::sc_detect_engine_state_free(state);
}
}
}
impl Drop for IKETransaction {
fn drop(&mut self) {
self.free();
}
}
#[derive(Default)]
pub struct IKEState {
tx_id: u64,
pub transactions: Vec<IKETransaction>,
pub ikev1_container: Ikev1Container,
pub ikev2_container: Ikev2Container,
}
impl IKEState {
// Free a transaction by ID.
fn free_tx(&mut self, tx_id: u64) {
let tx = self
.transactions
.iter()
.position(|tx| tx.tx_id == tx_id + 1);
debug_assert!(tx!= None);
if let Some(idx) = tx {
let _ = self.transactions.remove(idx);
}
}
pub fn get_tx(&mut self, tx_id: u64) -> Option<&mut IKETransaction> {
for tx in &mut self.transactions {
if tx.tx_id == tx_id + 1 {
return Some(tx);
}
}
return None;
}
pub fn new_tx(&mut self) -> IKETransaction {
let mut tx = IKETransaction::new();
self.tx_id += 1;
tx.tx_id = self.tx_id;
return tx;
}
/// Set an event. The event is set on the most recent transaction.
pub fn set_event(&mut self, event: IkeEvent) {
if let Some(tx) = self.transactions.last_mut() {
let ev = event as u8;
core::sc_app_layer_decoder_events_set_event_raw(&mut tx.events, ev);
} else {
SCLogDebug!(
"IKE: trying to set event {} on non-existing transaction",
event as u32
);
}
}
fn handle_input(&mut self, input: &[u8], direction: u8) -> AppLayerResult {
// We're not interested in empty requests.
if input.len() == 0 {
return AppLayerResult::ok();
}
let mut current = input;
match parse_isakmp_header(current) {
Ok((rem, isakmp_header)) => {
current = rem;
if isakmp_header.maj_ver!= 1 && isakmp_header.maj_ver!= 2 {
SCLogDebug!("Unsupported ISAKMP major_version");
return AppLayerResult::err();
}
if isakmp_header.maj_ver == 1 {
handle_ikev1(self, current, isakmp_header, direction);
} else if isakmp_header.maj_ver == 2 {
handle_ikev2(self, current, isakmp_header, direction);
} else {
return AppLayerResult::err();
}
return AppLayerResult::ok(); // todo either remove outer loop or check header length-field if we have completely read everything
}
Err(nom::Err::Incomplete(_)) => {
SCLogDebug!("Insufficient data while parsing IKE");
return AppLayerResult::err();
}
Err(_) => {
SCLogDebug!("Error while parsing IKE packet");
return AppLayerResult::err();
}
}
}
fn tx_iterator(
&mut self, min_tx_id: u64, state: &mut u64,
) -> Option<(&IKETransaction, u64, bool)> {
let mut index = *state as usize;
let len = self.transactions.len();
while index < len {
let tx = &self.transactions[index];
if tx.tx_id < min_tx_id + 1 {
index += 1;
continue;
}
*state = index as u64;
return Some((tx, tx.tx_id - 1, (len - index) > 1));
}
return None;
}
}
/// Probe to see if this input looks like a request or response.
fn probe(input: &[u8], direction: u8, rdir: *mut u8) -> bool {
match parse_isakmp_header(input) {
Ok((_, isakmp_header)) => {
if isakmp_header.maj_ver == 1 {
if isakmp_header.resp_spi == 0 && direction!= STREAM_TOSERVER {
unsafe {
*rdir = STREAM_TOSERVER;
}
}
return true;
} else if isakmp_header.maj_ver == 2 {
if isakmp_header.min_ver!= 0 {
SCLogDebug!(
"ipsec_probe: could be ipsec, but with unsupported/invalid version {}.{}",
isakmp_header.maj_ver,
isakmp_header.min_ver
);
return false;
}
if isakmp_header.exch_type < 34 || isakmp_header.exch_type > 37 {
SCLogDebug!("ipsec_probe: could be ipsec, but with unsupported/invalid exchange type {}",
isakmp_header.exch_type);
return false;
}
if isakmp_header.length as usize!= input.len() {
SCLogDebug!("ipsec_probe: could be ipsec, but length does not match");
return false;
}
if isakmp_header.resp_spi == 0 && direction!= STREAM_TOSERVER {
unsafe {
*rdir = STREAM_TOSERVER;
}
}
return true;
}
return false;
}
Err(_) => return false,
}
}
// C exports.
export_tx_get_detect_state!(rs_ike_tx_get_detect_state, IKETransaction);
export_tx_set_detect_state!(rs_ike_tx_set_detect_state, IKETransaction);
/// C entry point for a probing parser.
#[no_mangle]
pub unsafe extern "C" fn rs_ike_probing_parser(
_flow: *const Flow, direction: u8, input: *const u8, input_len: u32, rdir: *mut u8,
) -> AppProto {
if input_len < 28 {
// at least the ISAKMP_HEADER must be there, not ALPROTO_UNKNOWN because over UDP
return ALPROTO_FAILED;
}
if input!= std::ptr::null_mut() {
let slice = build_slice!(input, input_len as usize);
if probe(slice, direction, rdir) {
return ALPROTO_IKE ;
}
}
return ALPROTO_FAILED;
}
#[no_mangle]
pub extern "C" fn rs_ike_state_new(
_orig_state: *mut std::os::raw::c_void, _orig_proto: AppProto,
) -> *mut std::os::raw::c_void {
let state = IKEState::default();
let boxed = Box::new(state);
return Box::into_raw(boxed) as *mut _;
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_free(state: *mut std::os::raw::c_void) {
// Just unbox...
std::mem::drop(Box::from_raw(state as *mut IKEState));
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_tx_free(state: *mut std::os::raw::c_void, tx_id: u64) {
let state = cast_pointer!(state, IKEState);
state.free_tx(tx_id);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_parse_request(
_flow: *const Flow, state: *mut std::os::raw::c_void, _pstate: *mut std::os::raw::c_void,
input: *const u8, input_len: u32, _data: *const std::os::raw::c_void, _flags: u8,
) -> AppLayerResult {
let state = cast_pointer!(state, IKEState);
let buf = build_slice!(input, input_len as usize);
return state.handle_input(buf, STREAM_TOSERVER);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_parse_response(
_flow: *const Flow, state: *mut std::os::raw::c_void, _pstate: *mut std::os::raw::c_void,
input: *const u8, input_len: u32, _data: *const std::os::raw::c_void, _flags: u8,
) -> AppLayerResult {
let state = cast_pointer!(state, IKEState);
let buf = build_slice!(input, input_len as usize);
return state.handle_input(buf, STREAM_TOCLIENT);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx(
state: *mut std::os::raw::c_void, tx_id: u64,
) -> *mut std::os::raw::c_void {
let state = cast_pointer!(state, IKEState);
match state.get_tx(tx_id) {
Some(tx) => {
return tx as *const _ as *mut _;
}
None => {
return std::ptr::null_mut();
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx_count(state: *mut std::os::raw::c_void) -> u64 {
let state = cast_pointer!(state, IKEState);
return state.tx_id;
}
#[no_mangle]
pub extern "C" fn rs_ike_state_progress_completion_status(_direction: u8) -> std::os::raw::c_int {
// This parser uses 1 to signal transaction completion status.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_ike_tx_get_alstate_progress(
_tx: *mut std::os::raw::c_void, _direction: u8,
) -> std::os::raw::c_int {
return 1;
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_tx_get_logged(
_state: *mut std::os::raw::c_void, tx: *mut std::os::raw::c_void,
) -> u32 {
let tx = cast_pointer!(tx, IKETransaction);
return tx.logged.get();
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_tx_set_logged(
_state: *mut std::os::raw::c_void, tx: *mut std::os::raw::c_void, logged: u32,
) {
let tx = cast_pointer!(tx, IKETransaction);
tx.logged.set(logged);
}
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_events(
tx: *mut std::os::raw::c_void,
) -> *mut core::AppLayerDecoderEvents {
let tx = cast_pointer!(tx, IKETransaction);
return tx.events;
}
static mut ALPROTO_IKE : AppProto = ALPROTO_UNKNOWN;
#[no_mangle]
pub unsafe extern "C" fn rs_ike_state_get_tx_iterator(
_ipproto: u8, _alproto: AppProto, state: *mut std::os::raw::c_void, min_tx_id: u64,
_max_tx_id: u64, istate: &mut u64,
) -> applayer::AppLayerGetTxIterTuple {
let state = cast_pointer!(state, IKEState);
match state.tx_iterator(min_tx_id, istate) {
Some((tx, out_tx_id, has_next)) => {
let c_tx = tx as *const _ as *mut _;
let ires = applayer::AppLayerGetTxIterTuple::with_values(c_tx, out_tx_id, has_next);
return ires;
}
None => {
return applayer::AppLayerGetTxIterTuple::not_found();
}
}
}
// Parser name as a C style string.
const PARSER_NAME: &'static [u8] = b"ike\0";
const PARSER_ALIAS: &'static [u8] = b"ikev2\0";
export_tx_data_get!(rs_ike_get_tx_data, IKETransaction);
#[no_mangle]
pub unsafe extern "C" fn
|
() {
let default_port = CString::new("500").unwrap();
let parser = RustParser {
name : PARSER_NAME.as_ptr() as *const std::os::raw::c_char,
default_port : default_port.as_ptr(),
ipproto : core::IPPROTO_UDP,
probe_ts : Some(rs_ike_probing_parser),
probe_tc : Some(rs_ike_probing_parser),
min_depth : 0,
max_depth : 16,
state_new : rs_ike_state_new,
state_free : rs_ike_state_free,
tx_free : rs_ike_state_tx_free,
parse_ts : rs_ike_parse_request,
parse_tc : rs_ike_parse_response,
get_tx_count : rs_ike_state_get_tx_count,
get_tx : rs_ike_state_get_tx,
tx_comp_st_ts : 1,
tx_comp_st_tc : 1,
tx_get_progress : rs_ike_tx_get_alstate_progress,
get_de_state : rs_ike_tx_get_detect_state,
set_de_state : rs_ike_tx_set_detect_state,
get_events : Some(rs_ike_state_get_events),
get_eventinfo : Some(IkeEvent::get_event_info),
get_eventinfo_byid : Some(IkeEvent::get_event_info_by_id),
localstorage_new : None,
localstorage_free : None,
get_files : None,
get_tx_iterator : None,
get_tx_data : rs_ike_get_tx_data,
apply_tx_config : None,
flags : APP_LAYER_PARSER_OPT_UNIDIR_TXS,
truncate : None,
};
let ip_proto_str = CString::new("udp").unwrap();
if AppLayerProtoDetectConfProtoDetectionEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let alproto = AppLayerRegisterProtocolDetection(&parser, 1);
ALPROTO_IKE = alproto;
if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let _ = AppLayerRegisterParser(&parser, alproto);
}
AppLayerRegisterParserAlias(
PARSER_NAME.as_ptr() as *const std::os::raw::c_char,
PARSER_ALIAS.as_ptr() as *const std::os::raw::c_char,
);
SCLogDebug!("Rust IKE parser registered.");
} else {
SCLogDebug!("Protocol detector and parser disabled for IKE.");
}
}
|
rs_ike_register_parser
|
identifier_name
|
panic.rs
|
//! Panic runtime for Miri.
//!
//! The core pieces of the runtime are:
//! - An implementation of `__rust_maybe_catch_panic` that pushes the invoked stack frame with
//! some extra metadata derived from the panic-catching arguments of `__rust_maybe_catch_panic`.
//! - A hack in `libpanic_unwind` that calls the `miri_start_panic` intrinsic instead of the
//! target-native panic runtime. (This lives in the rustc repo.)
//! - An implementation of `miri_start_panic` that stores its argument (the panic payload), and then
//! immediately returns, but on the *unwind* edge (not the normal return edge), thus initiating unwinding.
//! - A hook executed each time a frame is popped, such that if the frame pushed by `__rust_maybe_catch_panic`
//! gets popped *during unwinding*, we take the panic payload and store it according to the extra
//! metadata we remembered when pushing said frame.
use rustc::mir;
use rustc::ty::{self, layout::LayoutOf};
use rustc_target::spec::PanicStrategy;
use rustc_span::source_map::Span;
use crate::*;
/// Holds all of the relevant data for a call to
/// `__rust_maybe_catch_panic`.
///
/// If a panic occurs, we update this data with
/// the information from the panic site.
#[derive(Debug)]
pub struct CatchUnwindData<'tcx> {
/// The dereferenced `data_ptr` argument passed to `__rust_maybe_catch_panic`.
pub data_place: MPlaceTy<'tcx, Tag>,
/// The dereferenced `vtable_ptr` argument passed to `__rust_maybe_catch_panic`.
pub vtable_place: MPlaceTy<'tcx, Tag>,
/// The `dest` from the original call to `__rust_maybe_catch_panic`.
pub dest: PlaceTy<'tcx, Tag>,
}
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx:'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Handles the special "miri_start_panic" intrinsic, which is called
/// by libpanic_unwind to delegate the actual unwinding process to Miri.
fn handle_miri_start_panic(
&mut self,
args: &[OpTy<'tcx, Tag>],
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
trace!("miri_start_panic: {:?}", this.frame().span);
|
this.machine.panic_payload.is_none(),
"the panic runtime should avoid double-panics"
);
this.machine.panic_payload = Some(scalar);
// Jump to the unwind block to begin unwinding.
this.unwind_to_block(unwind);
return Ok(());
}
fn handle_catch_panic(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: PlaceTy<'tcx, Tag>,
ret: mir::BasicBlock,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let tcx = &{ this.tcx.tcx };
// fn __rust_maybe_catch_panic(
// f: fn(*mut u8),
// data: *mut u8,
// data_ptr: *mut usize,
// vtable_ptr: *mut usize,
// ) -> u32
// Get all the arguments.
let f = this.read_scalar(args[0])?.not_undef()?;
let f_arg = this.read_scalar(args[1])?.not_undef()?;
let data_place = this.deref_operand(args[2])?;
let vtable_place = this.deref_operand(args[3])?;
// Now we make a function call, and pass `f_arg` as first and only argument.
let f_instance = this.memory.get_fn(f)?.as_instance()?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
let ret_place = MPlaceTy::dangling(this.layout_of(tcx.mk_unit())?, this).into();
this.call_function(
f_instance,
&[f_arg.into()],
Some(ret_place),
// Directly return to caller.
StackPopCleanup::Goto { ret: Some(ret), unwind: None },
)?;
// We ourselves will return `0`, eventually (will be overwritten if we catch a panic).
this.write_null(dest)?;
// In unwind mode, we tag this frame with some extra data.
// This lets `handle_stack_pop` (below) know that we should stop unwinding
// when we pop this frame.
if this.tcx.tcx.sess.panic_strategy() == PanicStrategy::Unwind {
this.frame_mut().extra.catch_panic =
Some(CatchUnwindData { data_place, vtable_place, dest })
}
return Ok(());
}
fn handle_stack_pop(
&mut self,
mut extra: FrameData<'tcx>,
unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo> {
let this = self.eval_context_mut();
trace!("handle_stack_pop(extra = {:?}, unwinding = {})", extra, unwinding);
// We only care about `catch_panic` if we're unwinding - if we're doing a normal
// return, then we don't need to do anything special.
let res = if let (true, Some(unwind_data)) = (unwinding, extra.catch_panic.take()) {
// We've just popped a frame that was pushed by `__rust_maybe_catch_panic`,
// and we are unwinding, so we should catch that.
trace!("unwinding: found catch_panic frame during unwinding: {:?}", this.frame().span);
// `panic_payload` now holds a `*mut (dyn Any + Send)`,
// provided by the `miri_start_panic` intrinsic.
// We want to split this into its consituient parts -
// the data and vtable pointers - and store them according to
// `unwind_data`, i.e., we store them where `__rust_maybe_catch_panic`
// was told to put them.
let payload = this.machine.panic_payload.take().unwrap();
let payload = this.ref_to_mplace(payload)?;
let payload_data_place = payload.ptr;
let payload_vtable_place = payload.meta.unwrap_meta();
this.write_scalar(payload_data_place, unwind_data.data_place.into())?;
this.write_scalar(payload_vtable_place, unwind_data.vtable_place.into())?;
// We set the return value of `__rust_maybe_catch_panic` to 1,
// since there was a panic.
let dest = unwind_data.dest;
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
StackPopInfo::StopUnwinding
} else {
StackPopInfo::Normal
};
this.memory.extra.stacked_borrows.borrow_mut().end_call(extra.call_id);
Ok(res)
}
fn assert_panic(
&mut self,
span: Span,
msg: &AssertMessage<'tcx>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
use rustc::mir::interpret::PanicInfo::*;
let this = self.eval_context_mut();
match msg {
BoundsCheck { ref index, ref len } => {
// Forward to `panic_bounds_check` lang item.
// First arg: Caller location.
let location = this.alloc_caller_location_for_span(span);
// Second arg: index.
let index = this.read_scalar(this.eval_operand(index, None)?)?;
// Third arg: len.
let len = this.read_scalar(this.eval_operand(len, None)?)?;
// Call the lang item.
let panic_bounds_check = this.tcx.lang_items().panic_bounds_check_fn().unwrap();
let panic_bounds_check = ty::Instance::mono(this.tcx.tcx, panic_bounds_check);
this.call_function(
panic_bounds_check,
&[location.ptr.into(), index.into(), len.into()],
None,
StackPopCleanup::Goto { ret: None, unwind },
)?;
}
_ => {
// Forward everything else to `panic` lang item.
// First arg: Message.
let msg = msg.description();
let msg = this.allocate_str(msg, MiriMemoryKind::Env.into());
// Call the lang item.
let panic = this.tcx.lang_items().panic_fn().unwrap();
let panic = ty::Instance::mono(this.tcx.tcx, panic);
this.call_function(
panic,
&[msg.to_ref()],
None,
StackPopCleanup::Goto { ret: None, unwind },
)?;
}
}
Ok(())
}
}
|
// Get the raw pointer stored in arg[0] (the panic payload).
let scalar = this.read_immediate(args[0])?;
assert!(
|
random_line_split
|
panic.rs
|
//! Panic runtime for Miri.
//!
//! The core pieces of the runtime are:
//! - An implementation of `__rust_maybe_catch_panic` that pushes the invoked stack frame with
//! some extra metadata derived from the panic-catching arguments of `__rust_maybe_catch_panic`.
//! - A hack in `libpanic_unwind` that calls the `miri_start_panic` intrinsic instead of the
//! target-native panic runtime. (This lives in the rustc repo.)
//! - An implementation of `miri_start_panic` that stores its argument (the panic payload), and then
//! immediately returns, but on the *unwind* edge (not the normal return edge), thus initiating unwinding.
//! - A hook executed each time a frame is popped, such that if the frame pushed by `__rust_maybe_catch_panic`
//! gets popped *during unwinding*, we take the panic payload and store it according to the extra
//! metadata we remembered when pushing said frame.
use rustc::mir;
use rustc::ty::{self, layout::LayoutOf};
use rustc_target::spec::PanicStrategy;
use rustc_span::source_map::Span;
use crate::*;
/// Holds all of the relevant data for a call to
/// `__rust_maybe_catch_panic`.
///
/// If a panic occurs, we update this data with
/// the information from the panic site.
#[derive(Debug)]
pub struct CatchUnwindData<'tcx> {
/// The dereferenced `data_ptr` argument passed to `__rust_maybe_catch_panic`.
pub data_place: MPlaceTy<'tcx, Tag>,
/// The dereferenced `vtable_ptr` argument passed to `__rust_maybe_catch_panic`.
pub vtable_place: MPlaceTy<'tcx, Tag>,
/// The `dest` from the original call to `__rust_maybe_catch_panic`.
pub dest: PlaceTy<'tcx, Tag>,
}
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx:'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Handles the special "miri_start_panic" intrinsic, which is called
/// by libpanic_unwind to delegate the actual unwinding process to Miri.
fn handle_miri_start_panic(
&mut self,
args: &[OpTy<'tcx, Tag>],
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
trace!("miri_start_panic: {:?}", this.frame().span);
// Get the raw pointer stored in arg[0] (the panic payload).
let scalar = this.read_immediate(args[0])?;
assert!(
this.machine.panic_payload.is_none(),
"the panic runtime should avoid double-panics"
);
this.machine.panic_payload = Some(scalar);
// Jump to the unwind block to begin unwinding.
this.unwind_to_block(unwind);
return Ok(());
}
fn
|
(
&mut self,
args: &[OpTy<'tcx, Tag>],
dest: PlaceTy<'tcx, Tag>,
ret: mir::BasicBlock,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let tcx = &{ this.tcx.tcx };
// fn __rust_maybe_catch_panic(
// f: fn(*mut u8),
// data: *mut u8,
// data_ptr: *mut usize,
// vtable_ptr: *mut usize,
// ) -> u32
// Get all the arguments.
let f = this.read_scalar(args[0])?.not_undef()?;
let f_arg = this.read_scalar(args[1])?.not_undef()?;
let data_place = this.deref_operand(args[2])?;
let vtable_place = this.deref_operand(args[3])?;
// Now we make a function call, and pass `f_arg` as first and only argument.
let f_instance = this.memory.get_fn(f)?.as_instance()?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
let ret_place = MPlaceTy::dangling(this.layout_of(tcx.mk_unit())?, this).into();
this.call_function(
f_instance,
&[f_arg.into()],
Some(ret_place),
// Directly return to caller.
StackPopCleanup::Goto { ret: Some(ret), unwind: None },
)?;
// We ourselves will return `0`, eventually (will be overwritten if we catch a panic).
this.write_null(dest)?;
// In unwind mode, we tag this frame with some extra data.
// This lets `handle_stack_pop` (below) know that we should stop unwinding
// when we pop this frame.
if this.tcx.tcx.sess.panic_strategy() == PanicStrategy::Unwind {
this.frame_mut().extra.catch_panic =
Some(CatchUnwindData { data_place, vtable_place, dest })
}
return Ok(());
}
fn handle_stack_pop(
&mut self,
mut extra: FrameData<'tcx>,
unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo> {
let this = self.eval_context_mut();
trace!("handle_stack_pop(extra = {:?}, unwinding = {})", extra, unwinding);
// We only care about `catch_panic` if we're unwinding - if we're doing a normal
// return, then we don't need to do anything special.
let res = if let (true, Some(unwind_data)) = (unwinding, extra.catch_panic.take()) {
// We've just popped a frame that was pushed by `__rust_maybe_catch_panic`,
// and we are unwinding, so we should catch that.
trace!("unwinding: found catch_panic frame during unwinding: {:?}", this.frame().span);
// `panic_payload` now holds a `*mut (dyn Any + Send)`,
// provided by the `miri_start_panic` intrinsic.
// We want to split this into its consituient parts -
// the data and vtable pointers - and store them according to
// `unwind_data`, i.e., we store them where `__rust_maybe_catch_panic`
// was told to put them.
let payload = this.machine.panic_payload.take().unwrap();
let payload = this.ref_to_mplace(payload)?;
let payload_data_place = payload.ptr;
let payload_vtable_place = payload.meta.unwrap_meta();
this.write_scalar(payload_data_place, unwind_data.data_place.into())?;
this.write_scalar(payload_vtable_place, unwind_data.vtable_place.into())?;
// We set the return value of `__rust_maybe_catch_panic` to 1,
// since there was a panic.
let dest = unwind_data.dest;
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
StackPopInfo::StopUnwinding
} else {
StackPopInfo::Normal
};
this.memory.extra.stacked_borrows.borrow_mut().end_call(extra.call_id);
Ok(res)
}
fn assert_panic(
&mut self,
span: Span,
msg: &AssertMessage<'tcx>,
unwind: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
use rustc::mir::interpret::PanicInfo::*;
let this = self.eval_context_mut();
match msg {
BoundsCheck { ref index, ref len } => {
// Forward to `panic_bounds_check` lang item.
// First arg: Caller location.
let location = this.alloc_caller_location_for_span(span);
// Second arg: index.
let index = this.read_scalar(this.eval_operand(index, None)?)?;
// Third arg: len.
let len = this.read_scalar(this.eval_operand(len, None)?)?;
// Call the lang item.
let panic_bounds_check = this.tcx.lang_items().panic_bounds_check_fn().unwrap();
let panic_bounds_check = ty::Instance::mono(this.tcx.tcx, panic_bounds_check);
this.call_function(
panic_bounds_check,
&[location.ptr.into(), index.into(), len.into()],
None,
StackPopCleanup::Goto { ret: None, unwind },
)?;
}
_ => {
// Forward everything else to `panic` lang item.
// First arg: Message.
let msg = msg.description();
let msg = this.allocate_str(msg, MiriMemoryKind::Env.into());
// Call the lang item.
let panic = this.tcx.lang_items().panic_fn().unwrap();
let panic = ty::Instance::mono(this.tcx.tcx, panic);
this.call_function(
panic,
&[msg.to_ref()],
None,
StackPopCleanup::Goto { ret: None, unwind },
)?;
}
}
Ok(())
}
}
|
handle_catch_panic
|
identifier_name
|
rom_nist384_32.rs
|
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
use nist384::big::NLEN;
use super::super::arch::Chunk;
use types::{ModType, CurveType, CurvePairingType, SexticTwist, SignOfX};
// Base Bits= 29
// Base Bits= 29
// nist384 Modulus
pub const MODULUS: [Chunk; NLEN] = [
0x1FFFFFFF, 0x7, 0x0, 0x1FFFFE00, 0x1FFFEFFF, 0x1FFFFFFF, 0x1FFFFFFF, 0x1FFFFFFF, 0x1FFFFFFF,
0x1FFFFFFF, 0x1FFFFFFF, 0x1FFFFFFF, 0x1FFFFFFF, 0x7F,
];
pub const R2MODP: [Chunk; NLEN] = [
0x0, 0x8000, 0x1FF80000, 0x1FFFFF, 0x2000000, 0x0, 0x0, 0x1FFFFFFC, 0xF, 0x100, 0x400, 0x0,
0x0, 0x0,
];
pub const MCONST: Chunk = 0x1;
// nist384 Curve
pub const CURVE_COF_I: isize = 1;
pub const CURVE_A: isize = -3;
pub const CURVE_B_I: isize = 0;
pub const CURVE_COF: [Chunk; NLEN] = [
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
];
pub const CURVE_B: [Chunk; NLEN] = [
0x13EC2AEF, 0x142E476E, 0xBB4674A, 0xC731B14, 0x1875AC65, 0x447A809, 0x4480C50, 0xDDFD028,
0x19181D9C, 0x1F1FC168, 0x623815A, 0x47DCFC9, 0x1312FA7E, 0x59,
];
pub const CURVE_ORDER: [Chunk; NLEN] = [
0xCC52973, 0x760CB56, 0xC29DEBB, 0x141B6491, 0x12DDF581, 0x6C0FA1B, 0x1FFF1D8D, 0x1FFFFFFF,
0x1FFFFFFF, 0x1FFFFFFF, 0x1FFFFFFF, 0x1FFFFFFF, 0x1FFFFFFF, 0x7F,
];
pub const CURVE_GX: [Chunk; NLEN] = [
0x12760AB7, 0x12A2F1C3, 0x154A5B0E, 0x5E4BB7E, 0x2A38550, 0xF0412A, 0xE6167DD, 0xC5174F3,
0x146E1D3B, 0x1799056B, 0x3AC71C7, 0x1D160A6F, 0x87CA22B, 0x55,
];
pub const CURVE_GY: [Chunk; NLEN] = [
0x10EA0E5F, 0x1218EBE4, 0x1FA0675E, 0x1639C3A, 0xB8C00A6, 0x1889DAF8, 0x11F3A768, 0x17A51342,
0x9F8F41D, 0x1C9496E1, 0x1767A62F, 0xC4C58DE, 0x17DE4A9, 0x1B,
];
pub const MODBYTES: usize = 48;
pub const BASEBITS: usize = 29;
pub const MODBITS: usize = 384;
pub const MOD8: usize = 7;
pub const MODTYPE: ModType = ModType::NOT_SPECIAL;
pub const SH: usize = 14;
pub const CURVETYPE: CurveType = CurveType::WEIERSTRASS;
pub const CURVE_PAIRING_TYPE: CurvePairingType = CurvePairingType::NOT;
pub const SEXTIC_TWIST: SexticTwist = SexticTwist::NOT;
pub const ATE_BITS: usize = 0;
|
pub const SIGN_OF_X: SignOfX = SignOfX::NOT;
pub const HASH_TYPE: usize = 48;
pub const AESKEY: usize = 24;
|
random_line_split
|
|
pkgid.rs
|
use cargo::ops;
use cargo::util::{CliResult, CliError, Config};
use cargo::util::important_paths::{find_root_manifest_for_wd};
#[derive(RustcDecodable)]
struct Options {
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
flag_manifest_path: Option<String>,
arg_spec: Option<String>,
}
pub const USAGE: &'static str = "
Print a fully qualified package specification
Usage:
cargo pkgid [options] [<spec>]
Options:
-h, --help Print this message
--manifest-path PATH Path to the manifest to the package to clean
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
Given a <spec> argument, print out the fully qualified package id specifier.
This command will generate an error if <spec> is ambiguous as to which package
it refers to in the dependency graph. If no <spec> is given, then the pkgid for
the local package is printed.
This command requires that a lockfile is available and dependencies have been
fetched.
Example Package IDs
pkgid | name | version | url
|-----------------------------|--------|-----------|---------------------|
foo | foo | * | *
foo:1.2.3 | foo | 1.2.3 | *
crates.io/foo | foo | * | *://crates.io/foo
crates.io/foo#1.2.3 | foo | 1.2.3 | *://crates.io/foo
crates.io/bar#foo:1.2.3 | foo | 1.2.3 | *://crates.io/bar
http://crates.io/foo#1.2.3 | foo | 1.2.3 | http://crates.io/foo
";
pub fn execute(options: Options,
config: &Config) -> CliResult<Option<()>>
|
{
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
let root = try!(find_root_manifest_for_wd(options.flag_manifest_path.clone(), config.cwd()));
let spec = options.arg_spec.as_ref().map(|s| &s[..]);
let spec = try!(ops::pkgid(&root, spec, config).map_err(|err| {
CliError::from_boxed(err, 101)
}));
println!("{}", spec);
Ok(None)
}
|
identifier_body
|
|
pkgid.rs
|
use cargo::ops;
use cargo::util::{CliResult, CliError, Config};
use cargo::util::important_paths::{find_root_manifest_for_wd};
#[derive(RustcDecodable)]
struct Options {
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
|
arg_spec: Option<String>,
}
pub const USAGE: &'static str = "
Print a fully qualified package specification
Usage:
cargo pkgid [options] [<spec>]
Options:
-h, --help Print this message
--manifest-path PATH Path to the manifest to the package to clean
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
Given a <spec> argument, print out the fully qualified package id specifier.
This command will generate an error if <spec> is ambiguous as to which package
it refers to in the dependency graph. If no <spec> is given, then the pkgid for
the local package is printed.
This command requires that a lockfile is available and dependencies have been
fetched.
Example Package IDs
pkgid | name | version | url
|-----------------------------|--------|-----------|---------------------|
foo | foo | * | *
foo:1.2.3 | foo | 1.2.3 | *
crates.io/foo | foo | * | *://crates.io/foo
crates.io/foo#1.2.3 | foo | 1.2.3 | *://crates.io/foo
crates.io/bar#foo:1.2.3 | foo | 1.2.3 | *://crates.io/bar
http://crates.io/foo#1.2.3 | foo | 1.2.3 | http://crates.io/foo
";
pub fn execute(options: Options,
config: &Config) -> CliResult<Option<()>> {
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
let root = try!(find_root_manifest_for_wd(options.flag_manifest_path.clone(), config.cwd()));
let spec = options.arg_spec.as_ref().map(|s| &s[..]);
let spec = try!(ops::pkgid(&root, spec, config).map_err(|err| {
CliError::from_boxed(err, 101)
}));
println!("{}", spec);
Ok(None)
}
|
flag_manifest_path: Option<String>,
|
random_line_split
|
pkgid.rs
|
use cargo::ops;
use cargo::util::{CliResult, CliError, Config};
use cargo::util::important_paths::{find_root_manifest_for_wd};
#[derive(RustcDecodable)]
struct
|
{
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
flag_manifest_path: Option<String>,
arg_spec: Option<String>,
}
pub const USAGE: &'static str = "
Print a fully qualified package specification
Usage:
cargo pkgid [options] [<spec>]
Options:
-h, --help Print this message
--manifest-path PATH Path to the manifest to the package to clean
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
Given a <spec> argument, print out the fully qualified package id specifier.
This command will generate an error if <spec> is ambiguous as to which package
it refers to in the dependency graph. If no <spec> is given, then the pkgid for
the local package is printed.
This command requires that a lockfile is available and dependencies have been
fetched.
Example Package IDs
pkgid | name | version | url
|-----------------------------|--------|-----------|---------------------|
foo | foo | * | *
foo:1.2.3 | foo | 1.2.3 | *
crates.io/foo | foo | * | *://crates.io/foo
crates.io/foo#1.2.3 | foo | 1.2.3 | *://crates.io/foo
crates.io/bar#foo:1.2.3 | foo | 1.2.3 | *://crates.io/bar
http://crates.io/foo#1.2.3 | foo | 1.2.3 | http://crates.io/foo
";
pub fn execute(options: Options,
config: &Config) -> CliResult<Option<()>> {
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
let root = try!(find_root_manifest_for_wd(options.flag_manifest_path.clone(), config.cwd()));
let spec = options.arg_spec.as_ref().map(|s| &s[..]);
let spec = try!(ops::pkgid(&root, spec, config).map_err(|err| {
CliError::from_boxed(err, 101)
}));
println!("{}", spec);
Ok(None)
}
|
Options
|
identifier_name
|
main.ng.rs
|
//! Multivariate linear regression using gradient descent
//!
//! Model:
//!
//! ```
//! y = x * theta + e
//!
//! y Dependent variable (scalar)
//! x Independent variables (1-by-n matrix)
//! theta Parameters to estimate (n-by-1 matrix)
//! e Error (scalar)
//! ```
//!
//! Solver: Normal Equation
//!
//! ```
//! theta = (X' * X)^-1 * X' * y
//!
//! E (m-by-1 matrix)
//! X (m-by-n matrix)
//! Y (m-by-1 matrix)
//! theta (n-by-1 matrix)
//! ```
#![allow(non_snake_case)]
#![deny(warnings)]
#![feature(plugin)]
#![plugin(linalg_macros)]
extern crate cast;
extern crate env_logger;
extern crate linalg;
extern crate lines;
extern crate time;
#[macro_use]
extern crate log;
use std::fs::File;
use std::io::{BufReader, self};
use std::path::Path;
use cast::From as _0;
use linalg::prelude::*;
use linalg::Transposed;
use lines::Lines;
macro_rules! timeit {
($msg:expr, $e:expr) => {{
let now = time::precise_time_ns();
let out = $e;
let elapsed = time::precise_time_ns() - now;
println!(concat!($msg, " took {} ms"), f64::from_(elapsed) / 1_000_000.);
out
}}
}
fn main() {
env_logger::init().unwrap();
// Some dummy operation to force the initialization of OpenBLAS' runtime (~90 ms) here rather
// than during the measurements below
(&mat![1., 2.; 3., 4.].inv() * &mat![1., 2.; 3., 4.]).eval();
let data = timeit!("Loading data", {
load("mpg.tsv").unwrap()
});
// Number of observations
let m = data.nrows();
println!("{} observations", m);
// Number of independent variables
let n = data.ncols() - 1;
println!("{} independent variables\n", n);
let ref mut X = Mat::ones((m, n + 1));
X[.., 1..] = data[.., 1..];
let y = data.col(0);
let X = &*X;
let theta = timeit!("Solving the normal equation", {
(&(X.t() * X).inv() * X.t() * y).eval()
});
println!("Estimated parameters: {:?}", theta);
}
/// Loads data from a TSV file
fn
|
<P>(path: P) -> io::Result<Transposed<Mat<f64>>> where P: AsRef<Path> {
fn load(path: &Path) -> io::Result<Transposed<Mat<f64>>> {
let mut lines = Lines::from(BufReader::new(try!(File::open(path))));
let mut v = vec![];
let ncols = {
let mut ncols = 0;
for number in try!(lines.next().unwrap()).split_whitespace() {
ncols += 1;
v.push(number.parse().unwrap());
}
ncols
};
let mut nrows = 1;
while let Some(line) = lines.next() {
let line = try!(line);
for number in line.split_whitespace() {
v.push(number.parse().unwrap());
}
nrows += 1;
}
unsafe {
Ok(Mat::from_raw_parts(v.into_boxed_slice(), (ncols, nrows)).t())
}
}
load(path.as_ref())
}
|
load
|
identifier_name
|
main.ng.rs
|
//! Multivariate linear regression using gradient descent
//!
//! Model:
//!
//! ```
//! y = x * theta + e
//!
//! y Dependent variable (scalar)
//! x Independent variables (1-by-n matrix)
//! theta Parameters to estimate (n-by-1 matrix)
//! e Error (scalar)
//! ```
//!
//! Solver: Normal Equation
//!
//! ```
//! theta = (X' * X)^-1 * X' * y
//!
//! E (m-by-1 matrix)
//! X (m-by-n matrix)
//! Y (m-by-1 matrix)
//! theta (n-by-1 matrix)
//! ```
#![allow(non_snake_case)]
#![deny(warnings)]
#![feature(plugin)]
#![plugin(linalg_macros)]
extern crate cast;
extern crate env_logger;
extern crate linalg;
extern crate lines;
extern crate time;
#[macro_use]
extern crate log;
use std::fs::File;
use std::io::{BufReader, self};
use std::path::Path;
use cast::From as _0;
use linalg::prelude::*;
use linalg::Transposed;
use lines::Lines;
macro_rules! timeit {
($msg:expr, $e:expr) => {{
let now = time::precise_time_ns();
let out = $e;
let elapsed = time::precise_time_ns() - now;
println!(concat!($msg, " took {} ms"), f64::from_(elapsed) / 1_000_000.);
out
}}
}
fn main() {
env_logger::init().unwrap();
// Some dummy operation to force the initialization of OpenBLAS' runtime (~90 ms) here rather
// than during the measurements below
(&mat![1., 2.; 3., 4.].inv() * &mat![1., 2.; 3., 4.]).eval();
let data = timeit!("Loading data", {
load("mpg.tsv").unwrap()
});
// Number of observations
let m = data.nrows();
println!("{} observations", m);
// Number of independent variables
let n = data.ncols() - 1;
println!("{} independent variables\n", n);
let ref mut X = Mat::ones((m, n + 1));
X[.., 1..] = data[.., 1..];
let y = data.col(0);
let X = &*X;
let theta = timeit!("Solving the normal equation", {
(&(X.t() * X).inv() * X.t() * y).eval()
});
println!("Estimated parameters: {:?}", theta);
}
/// Loads data from a TSV file
fn load<P>(path: P) -> io::Result<Transposed<Mat<f64>>> where P: AsRef<Path> {
fn load(path: &Path) -> io::Result<Transposed<Mat<f64>>>
|
for number in line.split_whitespace() {
v.push(number.parse().unwrap());
}
nrows += 1;
}
unsafe {
Ok(Mat::from_raw_parts(v.into_boxed_slice(), (ncols, nrows)).t())
}
}
load(path.as_ref())
}
|
{
let mut lines = Lines::from(BufReader::new(try!(File::open(path))));
let mut v = vec![];
let ncols = {
let mut ncols = 0;
for number in try!(lines.next().unwrap()).split_whitespace() {
ncols += 1;
v.push(number.parse().unwrap());
}
ncols
};
let mut nrows = 1;
while let Some(line) = lines.next() {
let line = try!(line);
|
identifier_body
|
main.ng.rs
|
//! Multivariate linear regression using gradient descent
//!
//! Model:
//!
//! ```
//! y = x * theta + e
//!
//! y Dependent variable (scalar)
//! x Independent variables (1-by-n matrix)
//! theta Parameters to estimate (n-by-1 matrix)
//! e Error (scalar)
//! ```
//!
//! Solver: Normal Equation
//!
//! ```
//! theta = (X' * X)^-1 * X' * y
//!
//! E (m-by-1 matrix)
//! X (m-by-n matrix)
//! Y (m-by-1 matrix)
//! theta (n-by-1 matrix)
|
#![feature(plugin)]
#![plugin(linalg_macros)]
extern crate cast;
extern crate env_logger;
extern crate linalg;
extern crate lines;
extern crate time;
#[macro_use]
extern crate log;
use std::fs::File;
use std::io::{BufReader, self};
use std::path::Path;
use cast::From as _0;
use linalg::prelude::*;
use linalg::Transposed;
use lines::Lines;
macro_rules! timeit {
($msg:expr, $e:expr) => {{
let now = time::precise_time_ns();
let out = $e;
let elapsed = time::precise_time_ns() - now;
println!(concat!($msg, " took {} ms"), f64::from_(elapsed) / 1_000_000.);
out
}}
}
fn main() {
env_logger::init().unwrap();
// Some dummy operation to force the initialization of OpenBLAS' runtime (~90 ms) here rather
// than during the measurements below
(&mat![1., 2.; 3., 4.].inv() * &mat![1., 2.; 3., 4.]).eval();
let data = timeit!("Loading data", {
load("mpg.tsv").unwrap()
});
// Number of observations
let m = data.nrows();
println!("{} observations", m);
// Number of independent variables
let n = data.ncols() - 1;
println!("{} independent variables\n", n);
let ref mut X = Mat::ones((m, n + 1));
X[.., 1..] = data[.., 1..];
let y = data.col(0);
let X = &*X;
let theta = timeit!("Solving the normal equation", {
(&(X.t() * X).inv() * X.t() * y).eval()
});
println!("Estimated parameters: {:?}", theta);
}
/// Loads data from a TSV file
fn load<P>(path: P) -> io::Result<Transposed<Mat<f64>>> where P: AsRef<Path> {
fn load(path: &Path) -> io::Result<Transposed<Mat<f64>>> {
let mut lines = Lines::from(BufReader::new(try!(File::open(path))));
let mut v = vec![];
let ncols = {
let mut ncols = 0;
for number in try!(lines.next().unwrap()).split_whitespace() {
ncols += 1;
v.push(number.parse().unwrap());
}
ncols
};
let mut nrows = 1;
while let Some(line) = lines.next() {
let line = try!(line);
for number in line.split_whitespace() {
v.push(number.parse().unwrap());
}
nrows += 1;
}
unsafe {
Ok(Mat::from_raw_parts(v.into_boxed_slice(), (ncols, nrows)).t())
}
}
load(path.as_ref())
}
|
//! ```
#![allow(non_snake_case)]
#![deny(warnings)]
|
random_line_split
|
parsing_utils.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ascii::AsciiExt;
use cssparser::ast::{ComponentValue, Ident, Comma, SkipWhitespaceIterable, SkipWhitespaceIterator};
pub fn one_component_value<'a>(input: &'a [ComponentValue]) -> Result<&'a ComponentValue, ()> {
let mut iter = input.skip_whitespace();
match iter.next() {
Some(value) => if iter.next().is_none() { Ok(value) } else { Err(()) },
None => Err(())
}
}
pub fn get_ident_lower(component_value: &ComponentValue) -> Result<String, ()> {
match component_value {
&Ident(ref value) => Ok(value.as_slice().to_ascii_lower()),
_ => Err(()),
}
}
pub struct BufferedIter<E, I> {
iter: I,
buffer: Option<E>,
}
impl<E, I: Iterator<E>> BufferedIter<E, I> {
pub fn new(iter: I) -> BufferedIter<E, I> {
BufferedIter {
iter: iter,
buffer: None,
}
}
#[inline]
pub fn push_back(&mut self, value: E)
|
#[inline]
pub fn is_eof(&mut self) -> bool {
match self.next() {
Some(value) => {
self.push_back(value);
false
}
None => true
}
}
#[inline]
pub fn next_as_result(&mut self) -> Result<E, ()> {
match self.next() {
Some(value) => Ok(value),
None => Err(()),
}
}
}
impl<E, I: Iterator<E>> Iterator<E> for BufferedIter<E, I> {
#[inline]
fn next(&mut self) -> Option<E> {
if self.buffer.is_some() {
self.buffer.take()
}
else {
self.iter.next()
}
}
}
pub type ParserIter<'a, 'b> = &'a mut BufferedIter<&'b ComponentValue, SkipWhitespaceIterator<'b>>;
#[inline]
pub fn parse_slice_comma_separated<T>(input: &[ComponentValue],
parse_one: |ParserIter| -> Result<T, ()>)
-> Result<Vec<T>, ()> {
parse_comma_separated(&mut BufferedIter::new(input.skip_whitespace()), parse_one)
}
#[inline]
pub fn parse_comma_separated<T>(iter: ParserIter,
parse_one: |ParserIter| -> Result<T, ()>)
-> Result<Vec<T>, ()> {
let mut values = vec![try!(parse_one(iter))];
loop {
match iter.next() {
Some(&Comma) => values.push(try!(parse_one(iter))),
Some(_) => return Err(()),
None => return Ok(values),
}
}
}
|
{
assert!(self.buffer.is_none());
self.buffer = Some(value);
}
|
identifier_body
|
parsing_utils.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ascii::AsciiExt;
use cssparser::ast::{ComponentValue, Ident, Comma, SkipWhitespaceIterable, SkipWhitespaceIterator};
pub fn one_component_value<'a>(input: &'a [ComponentValue]) -> Result<&'a ComponentValue, ()> {
let mut iter = input.skip_whitespace();
match iter.next() {
Some(value) => if iter.next().is_none() { Ok(value) } else { Err(()) },
None => Err(())
}
}
pub fn get_ident_lower(component_value: &ComponentValue) -> Result<String, ()> {
match component_value {
&Ident(ref value) => Ok(value.as_slice().to_ascii_lower()),
_ => Err(()),
}
}
pub struct BufferedIter<E, I> {
iter: I,
buffer: Option<E>,
}
impl<E, I: Iterator<E>> BufferedIter<E, I> {
pub fn new(iter: I) -> BufferedIter<E, I> {
BufferedIter {
iter: iter,
buffer: None,
}
}
#[inline]
pub fn push_back(&mut self, value: E) {
assert!(self.buffer.is_none());
self.buffer = Some(value);
}
#[inline]
pub fn is_eof(&mut self) -> bool {
match self.next() {
Some(value) => {
self.push_back(value);
false
}
None => true
}
}
#[inline]
pub fn next_as_result(&mut self) -> Result<E, ()> {
match self.next() {
Some(value) => Ok(value),
None => Err(()),
}
}
}
impl<E, I: Iterator<E>> Iterator<E> for BufferedIter<E, I> {
#[inline]
fn next(&mut self) -> Option<E> {
if self.buffer.is_some() {
self.buffer.take()
}
else {
self.iter.next()
}
|
}
}
pub type ParserIter<'a, 'b> = &'a mut BufferedIter<&'b ComponentValue, SkipWhitespaceIterator<'b>>;
#[inline]
pub fn parse_slice_comma_separated<T>(input: &[ComponentValue],
parse_one: |ParserIter| -> Result<T, ()>)
-> Result<Vec<T>, ()> {
parse_comma_separated(&mut BufferedIter::new(input.skip_whitespace()), parse_one)
}
#[inline]
pub fn parse_comma_separated<T>(iter: ParserIter,
parse_one: |ParserIter| -> Result<T, ()>)
-> Result<Vec<T>, ()> {
let mut values = vec![try!(parse_one(iter))];
loop {
match iter.next() {
Some(&Comma) => values.push(try!(parse_one(iter))),
Some(_) => return Err(()),
None => return Ok(values),
}
}
}
|
random_line_split
|
|
parsing_utils.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ascii::AsciiExt;
use cssparser::ast::{ComponentValue, Ident, Comma, SkipWhitespaceIterable, SkipWhitespaceIterator};
pub fn one_component_value<'a>(input: &'a [ComponentValue]) -> Result<&'a ComponentValue, ()> {
let mut iter = input.skip_whitespace();
match iter.next() {
Some(value) => if iter.next().is_none() { Ok(value) } else { Err(()) },
None => Err(())
}
}
pub fn get_ident_lower(component_value: &ComponentValue) -> Result<String, ()> {
match component_value {
&Ident(ref value) => Ok(value.as_slice().to_ascii_lower()),
_ => Err(()),
}
}
pub struct BufferedIter<E, I> {
iter: I,
buffer: Option<E>,
}
impl<E, I: Iterator<E>> BufferedIter<E, I> {
pub fn new(iter: I) -> BufferedIter<E, I> {
BufferedIter {
iter: iter,
buffer: None,
}
}
#[inline]
pub fn push_back(&mut self, value: E) {
assert!(self.buffer.is_none());
self.buffer = Some(value);
}
#[inline]
pub fn is_eof(&mut self) -> bool {
match self.next() {
Some(value) => {
self.push_back(value);
false
}
None => true
}
}
#[inline]
pub fn
|
(&mut self) -> Result<E, ()> {
match self.next() {
Some(value) => Ok(value),
None => Err(()),
}
}
}
impl<E, I: Iterator<E>> Iterator<E> for BufferedIter<E, I> {
#[inline]
fn next(&mut self) -> Option<E> {
if self.buffer.is_some() {
self.buffer.take()
}
else {
self.iter.next()
}
}
}
pub type ParserIter<'a, 'b> = &'a mut BufferedIter<&'b ComponentValue, SkipWhitespaceIterator<'b>>;
#[inline]
pub fn parse_slice_comma_separated<T>(input: &[ComponentValue],
parse_one: |ParserIter| -> Result<T, ()>)
-> Result<Vec<T>, ()> {
parse_comma_separated(&mut BufferedIter::new(input.skip_whitespace()), parse_one)
}
#[inline]
pub fn parse_comma_separated<T>(iter: ParserIter,
parse_one: |ParserIter| -> Result<T, ()>)
-> Result<Vec<T>, ()> {
let mut values = vec![try!(parse_one(iter))];
loop {
match iter.next() {
Some(&Comma) => values.push(try!(parse_one(iter))),
Some(_) => return Err(()),
None => return Ok(values),
}
}
}
|
next_as_result
|
identifier_name
|
parse.rs
|
#[macro_use]
extern crate criterion;
extern crate rand;
extern crate rs_poker;
use criterion::Criterion;
use rs_poker::holdem::RangeParser;
fn parse_ako(c: &mut Criterion) {
c.bench_function("Parse AKo", |b| {
b.iter(|| RangeParser::parse_one("AKo"));
});
}
fn
|
(c: &mut Criterion) {
c.bench_function("Parse pairs (22+)", |b| {
b.iter(|| RangeParser::parse_one("22+"));
});
}
fn parse_connectors(c: &mut Criterion) {
c.bench_function("Parse connectors (32+)", |b| {
b.iter(|| RangeParser::parse_one("32+"));
});
}
fn parse_plus(c: &mut Criterion) {
c.bench_function("Parse plus (A2+)", |b| {
b.iter(|| RangeParser::parse_one("A2+"));
});
}
criterion_group!(
benches,
parse_ako,
parse_pairs,
parse_connectors,
parse_plus
);
criterion_main!(benches);
|
parse_pairs
|
identifier_name
|
parse.rs
|
#[macro_use]
extern crate criterion;
extern crate rand;
extern crate rs_poker;
use criterion::Criterion;
use rs_poker::holdem::RangeParser;
fn parse_ako(c: &mut Criterion) {
c.bench_function("Parse AKo", |b| {
b.iter(|| RangeParser::parse_one("AKo"));
});
}
fn parse_pairs(c: &mut Criterion) {
c.bench_function("Parse pairs (22+)", |b| {
b.iter(|| RangeParser::parse_one("22+"));
});
}
fn parse_connectors(c: &mut Criterion) {
c.bench_function("Parse connectors (32+)", |b| {
b.iter(|| RangeParser::parse_one("32+"));
});
}
fn parse_plus(c: &mut Criterion) {
c.bench_function("Parse plus (A2+)", |b| {
b.iter(|| RangeParser::parse_one("A2+"));
});
}
criterion_group!(
benches,
parse_ako,
parse_pairs,
parse_connectors,
parse_plus
);
|
criterion_main!(benches);
|
random_line_split
|
|
parse.rs
|
#[macro_use]
extern crate criterion;
extern crate rand;
extern crate rs_poker;
use criterion::Criterion;
use rs_poker::holdem::RangeParser;
fn parse_ako(c: &mut Criterion) {
c.bench_function("Parse AKo", |b| {
b.iter(|| RangeParser::parse_one("AKo"));
});
}
fn parse_pairs(c: &mut Criterion)
|
fn parse_connectors(c: &mut Criterion) {
c.bench_function("Parse connectors (32+)", |b| {
b.iter(|| RangeParser::parse_one("32+"));
});
}
fn parse_plus(c: &mut Criterion) {
c.bench_function("Parse plus (A2+)", |b| {
b.iter(|| RangeParser::parse_one("A2+"));
});
}
criterion_group!(
benches,
parse_ako,
parse_pairs,
parse_connectors,
parse_plus
);
criterion_main!(benches);
|
{
c.bench_function("Parse pairs (22+)", |b| {
b.iter(|| RangeParser::parse_one("22+"));
});
}
|
identifier_body
|
rpc.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use app_units::Au;
|
use servo_arc::Arc;
use style::properties::ComputedValues;
use style::properties::longhands::overflow_x;
use webrender_api::ExternalScrollId;
/// Synchronous messages that script can send to layout.
///
/// In general, you should use messages to talk to Layout. Use the RPC interface
/// if and only if the work is
///
/// 1) read-only with respect to LayoutThreadData,
/// 2) small,
/// 3) and really needs to be fast.
pub trait LayoutRPC {
/// Requests the dimensions of the content box, as in the `getBoundingClientRect()` call.
fn content_box(&self) -> ContentBoxResponse;
/// Requests the dimensions of all the content boxes, as in the `getClientRects()` call.
fn content_boxes(&self) -> ContentBoxesResponse;
/// Requests the geometry of this node. Used by APIs such as `clientTop`.
fn node_geometry(&self) -> NodeGeometryResponse;
/// Requests the scroll geometry of this node. Used by APIs such as `scrollTop`.
fn node_scroll_area(&self) -> NodeGeometryResponse;
/// Requests the scroll id of this node. Used by APIs such as `scrollTop`
fn node_scroll_id(&self) -> NodeScrollIdResponse;
/// Query layout for the resolved value of a given CSS property
fn resolved_style(&self) -> ResolvedStyleResponse;
fn offset_parent(&self) -> OffsetParentResponse;
/// Requests the styles for an element. Contains a `None` value if the element is in a `display:
/// none` subtree.
fn style(&self) -> StyleResponse;
fn text_index(&self) -> TextIndexResponse;
/// Requests the list of nodes from the given point.
fn nodes_from_point_response(&self) -> Vec<UntrustedNodeAddress>;
/// Query layout to get the inner text for a given element.
fn element_inner_text(&self) -> String;
}
pub struct ContentBoxResponse(pub Option<Rect<Au>>);
pub struct ContentBoxesResponse(pub Vec<Rect<Au>>);
pub struct NodeGeometryResponse {
pub client_rect: Rect<i32>,
}
pub struct NodeOverflowResponse(pub Option<Point2D<overflow_x::computed_value::T>>);
pub struct NodeScrollIdResponse(pub ExternalScrollId);
pub struct ResolvedStyleResponse(pub String);
#[derive(Clone)]
pub struct OffsetParentResponse {
pub node_address: Option<UntrustedNodeAddress>,
pub rect: Rect<Au>,
}
impl OffsetParentResponse {
pub fn empty() -> OffsetParentResponse {
OffsetParentResponse {
node_address: None,
rect: Rect::zero(),
}
}
}
#[derive(Clone)]
pub struct StyleResponse(pub Option<Arc<ComputedValues>>);
#[derive(Clone)]
pub struct TextIndexResponse(pub Option<usize>);
|
use euclid::{Point2D, Rect};
use script_traits::UntrustedNodeAddress;
|
random_line_split
|
rpc.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use app_units::Au;
use euclid::{Point2D, Rect};
use script_traits::UntrustedNodeAddress;
use servo_arc::Arc;
use style::properties::ComputedValues;
use style::properties::longhands::overflow_x;
use webrender_api::ExternalScrollId;
/// Synchronous messages that script can send to layout.
///
/// In general, you should use messages to talk to Layout. Use the RPC interface
/// if and only if the work is
///
/// 1) read-only with respect to LayoutThreadData,
/// 2) small,
/// 3) and really needs to be fast.
pub trait LayoutRPC {
/// Requests the dimensions of the content box, as in the `getBoundingClientRect()` call.
fn content_box(&self) -> ContentBoxResponse;
/// Requests the dimensions of all the content boxes, as in the `getClientRects()` call.
fn content_boxes(&self) -> ContentBoxesResponse;
/// Requests the geometry of this node. Used by APIs such as `clientTop`.
fn node_geometry(&self) -> NodeGeometryResponse;
/// Requests the scroll geometry of this node. Used by APIs such as `scrollTop`.
fn node_scroll_area(&self) -> NodeGeometryResponse;
/// Requests the scroll id of this node. Used by APIs such as `scrollTop`
fn node_scroll_id(&self) -> NodeScrollIdResponse;
/// Query layout for the resolved value of a given CSS property
fn resolved_style(&self) -> ResolvedStyleResponse;
fn offset_parent(&self) -> OffsetParentResponse;
/// Requests the styles for an element. Contains a `None` value if the element is in a `display:
/// none` subtree.
fn style(&self) -> StyleResponse;
fn text_index(&self) -> TextIndexResponse;
/// Requests the list of nodes from the given point.
fn nodes_from_point_response(&self) -> Vec<UntrustedNodeAddress>;
/// Query layout to get the inner text for a given element.
fn element_inner_text(&self) -> String;
}
pub struct ContentBoxResponse(pub Option<Rect<Au>>);
pub struct ContentBoxesResponse(pub Vec<Rect<Au>>);
pub struct NodeGeometryResponse {
pub client_rect: Rect<i32>,
}
pub struct NodeOverflowResponse(pub Option<Point2D<overflow_x::computed_value::T>>);
pub struct NodeScrollIdResponse(pub ExternalScrollId);
pub struct ResolvedStyleResponse(pub String);
#[derive(Clone)]
pub struct OffsetParentResponse {
pub node_address: Option<UntrustedNodeAddress>,
pub rect: Rect<Au>,
}
impl OffsetParentResponse {
pub fn empty() -> OffsetParentResponse {
OffsetParentResponse {
node_address: None,
rect: Rect::zero(),
}
}
}
#[derive(Clone)]
pub struct
|
(pub Option<Arc<ComputedValues>>);
#[derive(Clone)]
pub struct TextIndexResponse(pub Option<usize>);
|
StyleResponse
|
identifier_name
|
combine.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
///////////////////////////////////////////////////////////////////////////
// # Type combining
//
// There are four type combiners: equate, sub, lub, and glb. Each
// implements the trait `Combine` and contains methods for combining
// two instances of various things and yielding a new instance. These
// combiner methods always yield a `Result<T>`. There is a lot of
// common code for these operations, implemented as default methods on
// the `Combine` trait.
//
// Each operation may have side-effects on the inference context,
// though these can be unrolled using snapshots. On success, the
// LUB/GLB operations return the appropriate bound. The Eq and Sub
// operations generally return the first operand.
//
// ## Contravariance
//
// When you are relating two things which have a contravariant
// relationship, you should use `contratys()` or `contraregions()`,
// rather than inversing the order of arguments! This is necessary
// because the order of arguments is not relevant for LUB and GLB. It
// is also useful to track which value is the "expected" value in
// terms of error reporting.
use super::bivariate::Bivariate;
use super::equate::Equate;
use super::glb::Glb;
use super::lub::Lub;
use super::sub::Sub;
use super::{InferCtxt};
use super::{MiscVariable, TypeTrace};
use super::type_variable::{RelationDir, BiTo, EqTo, SubtypeOf, SupertypeOf};
use middle::ty::{TyVar};
use middle::ty::{IntType, UintType};
use middle::ty::{self, Ty};
use middle::ty_fold;
use middle::ty_fold::{TypeFolder, TypeFoldable};
use middle::ty_relate::{self, Relate, RelateResult, TypeRelation};
use util::ppaux::Repr;
use syntax::ast;
use syntax::codemap::Span;
#[derive(Clone)]
pub struct CombineFields<'a, 'tcx: 'a> {
pub infcx: &'a InferCtxt<'a, 'tcx>,
pub a_is_expected: bool,
pub trace: TypeTrace<'tcx>,
}
pub fn super_combine_tys<'a,'tcx:'a,R>(infcx: &InferCtxt<'a, 'tcx>,
relation: &mut R,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> RelateResult<'tcx, Ty<'tcx>>
where R: TypeRelation<'a,'tcx>
{
let a_is_expected = relation.a_is_expected();
match (&a.sty, &b.sty) {
// Relate integral variables to other types
(&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => {
try!(infcx.int_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| int_unification_error(a_is_expected, e)));
Ok(a)
}
(&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => {
unify_integral_variable(infcx, a_is_expected, v_id, IntType(v))
}
(&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => {
unify_integral_variable(infcx,!a_is_expected, v_id, IntType(v))
}
(&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => {
unify_integral_variable(infcx, a_is_expected, v_id, UintType(v))
}
(&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => {
unify_integral_variable(infcx,!a_is_expected, v_id, UintType(v))
}
// Relate floating-point variables to other types
(&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => {
try!(infcx.float_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| float_unification_error(relation.a_is_expected(), e)));
Ok(a)
}
(&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => {
unify_float_variable(infcx, a_is_expected, v_id, v)
}
(&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => {
unify_float_variable(infcx,!a_is_expected, v_id, v)
}
// All other cases of inference are errors
(&ty::TyInfer(_), _) |
(_, &ty::TyInfer(_)) => {
Err(ty::terr_sorts(ty_relate::expected_found(relation, &a, &b)))
}
_ => {
ty_relate::super_relate_tys(relation, a, b)
}
}
}
fn unify_integral_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
vid_is_expected: bool,
vid: ty::IntVid,
val: ty::IntVarValue)
-> RelateResult<'tcx, Ty<'tcx>>
{
try!(infcx
.int_unification_table
.borrow_mut()
.unify_var_value(vid, val)
.map_err(|e| int_unification_error(vid_is_expected, e)));
match val {
IntType(v) => Ok(ty::mk_mach_int(infcx.tcx, v)),
UintType(v) => Ok(ty::mk_mach_uint(infcx.tcx, v)),
}
}
fn unify_float_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
vid_is_expected: bool,
vid: ty::FloatVid,
val: ast::FloatTy)
-> RelateResult<'tcx, Ty<'tcx>>
{
try!(infcx
.float_unification_table
.borrow_mut()
.unify_var_value(vid, val)
.map_err(|e| float_unification_error(vid_is_expected, e)));
Ok(ty::mk_mach_float(infcx.tcx, val))
}
impl<'a, 'tcx> CombineFields<'a, 'tcx> {
pub fn tcx(&self) -> &'a ty::ctxt<'tcx> {
self.infcx.tcx
}
pub fn switch_expected(&self) -> CombineFields<'a, 'tcx> {
CombineFields {
a_is_expected:!self.a_is_expected,
..(*self).clone()
}
}
pub fn equate(&self) -> Equate<'a, 'tcx> {
Equate::new(self.clone())
}
pub fn bivariate(&self) -> Bivariate<'a, 'tcx> {
Bivariate::new(self.clone())
}
pub fn sub(&self) -> Sub<'a, 'tcx> {
Sub::new(self.clone())
}
pub fn lub(&self) -> Lub<'a, 'tcx> {
Lub::new(self.clone())
}
pub fn glb(&self) -> Glb<'a, 'tcx> {
Glb::new(self.clone())
}
pub fn instantiate(&self,
a_ty: Ty<'tcx>,
dir: RelationDir,
b_vid: ty::TyVid)
-> RelateResult<'tcx, ()>
{
let tcx = self.infcx.tcx;
let mut stack = Vec::new();
stack.push((a_ty, dir, b_vid));
loop {
// For each turn of the loop, we extract a tuple
//
// (a_ty, dir, b_vid)
//
// to relate. Here dir is either SubtypeOf or
// SupertypeOf. The idea is that we should ensure that
// the type `a_ty` is a subtype or supertype (respectively) of the
// type to which `b_vid` is bound.
//
// If `b_vid` has not yet been instantiated with a type
// (which is always true on the first iteration, but not
// necessarily true on later iterations), we will first
// instantiate `b_vid` with a *generalized* version of
// `a_ty`. Generalization introduces other inference
// variables wherever subtyping could occur (at time of
// this writing, this means replacing free regions with
// region variables).
let (a_ty, dir, b_vid) = match stack.pop() {
None => break,
Some(e) => e,
};
debug!("instantiate(a_ty={} dir={:?} b_vid={})",
a_ty.repr(tcx),
dir,
b_vid.repr(tcx));
// Check whether `vid` has been instantiated yet. If not,
// make a generalized form of `ty` and instantiate with
// that.
let b_ty = self.infcx.type_variables.borrow().probe(b_vid);
let b_ty = match b_ty {
Some(t) => t, //...already instantiated.
None => { //...not yet instantiated:
// Generalize type if necessary.
let generalized_ty = try!(match dir {
EqTo => self.generalize(a_ty, b_vid, false),
BiTo | SupertypeOf | SubtypeOf => self.generalize(a_ty, b_vid, true),
});
debug!("instantiate(a_ty={}, dir={:?}, \
b_vid={}, generalized_ty={})",
a_ty.repr(tcx), dir, b_vid.repr(tcx),
generalized_ty.repr(tcx));
self.infcx.type_variables
.borrow_mut()
.instantiate_and_push(
b_vid, generalized_ty, &mut stack);
generalized_ty
}
};
// The original triple was `(a_ty, dir, b_vid)` -- now we have
// resolved `b_vid` to `b_ty`, so apply `(a_ty, dir, b_ty)`:
|
// the stack to get this right.
try!(match dir {
BiTo => self.bivariate().relate(&a_ty, &b_ty),
EqTo => self.equate().relate(&a_ty, &b_ty),
SubtypeOf => self.sub().relate(&a_ty, &b_ty),
SupertypeOf => self.sub().relate_with_variance(ty::Contravariant, &a_ty, &b_ty),
});
}
Ok(())
}
/// Attempts to generalize `ty` for the type variable `for_vid`. This checks for cycle -- that
/// is, whether the type `ty` references `for_vid`. If `make_region_vars` is true, it will also
/// replace all regions with fresh variables. Returns `TyError` in the case of a cycle, `Ok`
/// otherwise.
fn generalize(&self,
ty: Ty<'tcx>,
for_vid: ty::TyVid,
make_region_vars: bool)
-> RelateResult<'tcx, Ty<'tcx>>
{
let mut generalize = Generalizer {
infcx: self.infcx,
span: self.trace.origin.span(),
for_vid: for_vid,
make_region_vars: make_region_vars,
cycle_detected: false
};
let u = ty.fold_with(&mut generalize);
if generalize.cycle_detected {
Err(ty::terr_cyclic_ty)
} else {
Ok(u)
}
}
}
struct Generalizer<'cx, 'tcx:'cx> {
infcx: &'cx InferCtxt<'cx, 'tcx>,
span: Span,
for_vid: ty::TyVid,
make_region_vars: bool,
cycle_detected: bool,
}
impl<'cx, 'tcx> ty_fold::TypeFolder<'tcx> for Generalizer<'cx, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.infcx.tcx
}
fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
// Check to see whether the type we are genealizing references
// `vid`. At the same time, also update any type variables to
// the values that they are bound to. This is needed to truly
// check for cycles, but also just makes things readable.
//
// (In particular, you could have something like `$0 = Box<$1>`
// where `$1` has already been instantiated with `Box<$0>`)
match t.sty {
ty::TyInfer(ty::TyVar(vid)) => {
if vid == self.for_vid {
self.cycle_detected = true;
self.tcx().types.err
} else {
match self.infcx.type_variables.borrow().probe(vid) {
Some(u) => self.fold_ty(u),
None => t,
}
}
}
_ => {
ty_fold::super_fold_ty(self, t)
}
}
}
fn fold_region(&mut self, r: ty::Region) -> ty::Region {
match r {
// Never make variables for regions bound within the type itself.
ty::ReLateBound(..) => { return r; }
// Early-bound regions should really have been substituted away before
// we get to this point.
ty::ReEarlyBound(..) => {
self.tcx().sess.span_bug(
self.span,
&format!("Encountered early bound region when generalizing: {}",
r.repr(self.tcx())));
}
// Always make a fresh region variable for skolemized regions;
// the higher-ranked decision procedures rely on this.
ty::ReInfer(ty::ReSkolemized(..)) => { }
// For anything else, we make a region variable, unless we
// are *equating*, in which case it's just wasteful.
ty::ReEmpty |
ty::ReStatic |
ty::ReScope(..) |
ty::ReInfer(ty::ReVar(..)) |
ty::ReFree(..) => {
if!self.make_region_vars {
return r;
}
}
}
// FIXME: This is non-ideal because we don't give a
// very descriptive origin for this region variable.
self.infcx.next_region_var(MiscVariable(self.span))
}
}
pub trait RelateResultCompare<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
F: FnOnce() -> ty::type_err<'tcx>;
}
impl<'tcx, T:Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
F: FnOnce() -> ty::type_err<'tcx>,
{
self.clone().and_then(|s| {
if s == t {
self.clone()
} else {
Err(f())
}
})
}
}
fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::IntVarValue))
-> ty::type_err<'tcx>
{
let (a, b) = v;
ty::terr_int_mismatch(ty_relate::expected_found_bool(a_is_expected, &a, &b))
}
fn float_unification_error<'tcx>(a_is_expected: bool,
v: (ast::FloatTy, ast::FloatTy))
-> ty::type_err<'tcx>
{
let (a, b) = v;
ty::terr_float_mismatch(ty_relate::expected_found_bool(a_is_expected, &a, &b))
}
|
//
// FIXME(#16847): This code is non-ideal because all these subtype
// relations wind up attributed to the same spans. We need
// to associate causes/spans with each of the relations in
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.