file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs | //! If an extern token is provided, then this pass validates that
//! terminal IDs have conversions. Otherwise, it generates a
//! tokenizer. This can only be done after macro expansion because
//! some macro arguments never make it into an actual production and
//! are only used in `if` conditions; we use string literals for
//! those, but they do not have to have a defined conversion.
use super::{NormResult, NormError};
use intern::{self, intern};
use lexer::re;
use lexer::dfa::{self, DFAConstructionError, Precedence};
use lexer::nfa::NFAConstructionError::*;
use grammar::consts::*;
use grammar::parse_tree::*;
use collections::Set;
use collections::{map, Map};
#[cfg(test)]
mod test;
pub fn validate(mut grammar: Grammar) -> NormResult<Grammar> {
let (has_enum_token, all_literals) = {
let opt_enum_token = grammar.enum_token();
let conversions = opt_enum_token.map(|et| {
et.conversions.iter()
.map(|conversion| conversion.from)
.collect()
});
let mut validator = Validator {
grammar: &grammar,
all_literals: map(),
conversions: conversions,
};
try!(validator.validate());
(opt_enum_token.is_some(), validator.all_literals)
};
if!has_enum_token {
try!(construct(&mut grammar, all_literals));
}
Ok(grammar)
}
///////////////////////////////////////////////////////////////////////////
// Validation phase -- this phase walks the grammar and visits all
// terminals. If using an external set of tokens, it checks that all
// terminals have a defined conversion to some pattern. Otherwise,
// it collects all terminals into the `all_literals` set for later use.
struct Validator<'grammar> {
grammar: &'grammar Grammar,
all_literals: Map<TerminalLiteral, Span>,
conversions: Option<Set<TerminalString>>,
}
impl<'grammar> Validator<'grammar> {
fn validate(&mut self) -> NormResult<()> {
for item in &self.grammar.items {
match *item {
GrammarItem::Use(..) => { }
GrammarItem::ExternToken(_) => { }
GrammarItem::InternToken(_) => { }
GrammarItem::Nonterminal(ref data) => {
for alternative in &data.alternatives {
try!(self.validate_alternative(alternative));
}
}
}
}
Ok(())
}
fn validate_alternative(&mut self, alternative: &Alternative) -> NormResult<()> {
assert!(alternative.condition.is_none()); // macro expansion should have removed these
try!(self.validate_expr(&alternative.expr));
Ok(())
}
fn validate_expr(&mut self, expr: &ExprSymbol) -> NormResult<()> {
for symbol in &expr.symbols {
try!(self.validate_symbol(symbol));
}
Ok(())
}
fn validate_symbol(&mut self, symbol: &Symbol) -> NormResult<()> {
match symbol.kind {
SymbolKind::Expr(ref expr) => {
try!(self.validate_expr(expr));
}
SymbolKind::Terminal(term) => {
try!(self.validate_terminal(symbol.span, term));
}
SymbolKind::Nonterminal(_) => {
}
SymbolKind::Repeat(ref repeat) => {
try!(self.validate_symbol(&repeat.symbol));
}
SymbolKind::Choose(ref sym) | SymbolKind::Name(_, ref sym) => {
try!(self.validate_symbol(sym));
}
SymbolKind::Lookahead | SymbolKind::Lookbehind => {
}
SymbolKind::AmbiguousId(id) => {
panic!("ambiguous id `{}` encountered after name resolution", id)
}
SymbolKind::Macro(..) => {
panic!("macro not removed: {:?}", symbol);
}
}
Ok(())
}
fn validate_terminal(&mut self, span: Span, term: TerminalString) -> NormResult<()> {
match self.conversions {
// If there is an extern token definition, validate that
// this terminal has a defined conversion.
Some(ref c) => {
if!c.contains(&term) {
return_err!(span, "terminal `{}` does not have a pattern defined for it",
term);
}
}
// If there is no extern token definition, then collect
// the terminal literals ("class", r"[a-z]+") into a set.
None => match term {
TerminalString::Bare(c) => {
// Bare identifiers like `x` can never be resolved
// as terminals unless there is a conversion | TerminalString::Literal(l) => {
self.all_literals.entry(l).or_insert(span);
}
}
}
Ok(())
}
}
///////////////////////////////////////////////////////////////////////////
// Construction phase -- if we are constructing a tokenizer, this
// phase builds up an internal token DFA.
pub fn construct(grammar: &mut Grammar, literals_map: Map<TerminalLiteral, Span>) -> NormResult<()> {
let literals: Vec<TerminalLiteral> =
literals_map.keys()
.cloned()
.collect();
// Build up two vectors, one of parsed regular expressions and
// one of precedences, that are parallel with `literals`.
let mut regexs = Vec::with_capacity(literals.len());
let mut precedences = Vec::with_capacity(literals.len());
try!(intern::read(|interner| {
for &literal in &literals {
match literal {
TerminalLiteral::Quoted(s) => {
precedences.push(Precedence(1));
regexs.push(re::parse_literal(interner.data(s)));
}
TerminalLiteral::Regex(s) => {
precedences.push(Precedence(0));
match re::parse_regex(interner.data(s)) {
Ok(regex) => regexs.push(regex),
Err(error) => {
let literal_span = literals_map[&literal];
// FIXME -- take offset into account for
// span; this requires knowing how many #
// the user used, which we do not track
return_err!(
literal_span,
"invalid regular expression: {}",
error);
}
}
}
}
}
Ok(())
}));
let dfa = match dfa::build_dfa(®exs, &precedences) {
Ok(dfa) => dfa,
Err(DFAConstructionError::NFAConstructionError { index, error }) => {
let feature = match error {
NamedCaptures => r#"named captures (`(?P<foo>...)`)"#,
NonGreedy => r#""non-greedy" repetitions (`*?` or `+?`)"#,
WordBoundary => r#"word boundaries (`\b` or `\B`)"#,
LineBoundary => r#"line boundaries (`^` or `$`)"#,
TextBoundary => r#"text boundaries (`^` or `$`)"#,
};
let literal = literals[index.index()];
let span = literals_map[&literal];
return_err!(
span,
"{} are not supported in regular expressions",
feature)
}
Err(DFAConstructionError::Ambiguity { match0, match1 }) => {
let literal0 = literals[match0.index()];
let literal1 = literals[match1.index()];
let span0 = literals_map[&literal0];
let _span1 = literals_map[&literal1];
// FIXME(#88) -- it'd be nice to give an example here
return_err!(
span0,
"ambiguity detected between the terminal `{}` and the terminal `{}`",
literal0, literal1);
}
};
grammar.items.push(GrammarItem::InternToken(InternToken {
literals: literals,
dfa: dfa
}));
// we need to inject a `'input` lifetime and `input: &'input str` parameter as well:
let input_lifetime = intern(INPUT_LIFETIME);
for parameter in &grammar.type_parameters {
match *parameter {
TypeParameter::Lifetime(i) if i == input_lifetime => {
return_err!(
grammar.span,
"since there is no external token enum specified, \
the `'input` lifetime is implicit and cannot be declared");
}
_ => { }
}
}
let input_parameter = intern(INPUT_PARAMETER);
for parameter in &grammar.parameters {
if parameter.name == input_parameter {
return_err!(
grammar.span,
"since there is no external token enum specified, \
the `input` parameter is implicit and cannot be declared");
}
}
grammar.type_parameters.insert(0, TypeParameter::Lifetime(input_lifetime));
let parameter = Parameter {
name: input_parameter,
ty: TypeRef::Ref {
lifetime: Some(input_lifetime),
mutable: false,
referent: Box::new(TypeRef::Id(intern("str")))
}
};
grammar.parameters.push(parameter);
Ok(())
} | // defined for them that indicates they are a
// terminal; otherwise it's just an unresolved
// identifier.
panic!("bare literal `{}` without extern token definition", c);
} | random_line_split |
mod.rs | //! If an extern token is provided, then this pass validates that
//! terminal IDs have conversions. Otherwise, it generates a
//! tokenizer. This can only be done after macro expansion because
//! some macro arguments never make it into an actual production and
//! are only used in `if` conditions; we use string literals for
//! those, but they do not have to have a defined conversion.
use super::{NormResult, NormError};
use intern::{self, intern};
use lexer::re;
use lexer::dfa::{self, DFAConstructionError, Precedence};
use lexer::nfa::NFAConstructionError::*;
use grammar::consts::*;
use grammar::parse_tree::*;
use collections::Set;
use collections::{map, Map};
#[cfg(test)]
mod test;
pub fn validate(mut grammar: Grammar) -> NormResult<Grammar> {
let (has_enum_token, all_literals) = {
let opt_enum_token = grammar.enum_token();
let conversions = opt_enum_token.map(|et| {
et.conversions.iter()
.map(|conversion| conversion.from)
.collect()
});
let mut validator = Validator {
grammar: &grammar,
all_literals: map(),
conversions: conversions,
};
try!(validator.validate());
(opt_enum_token.is_some(), validator.all_literals)
};
if!has_enum_token {
try!(construct(&mut grammar, all_literals));
}
Ok(grammar)
}
///////////////////////////////////////////////////////////////////////////
// Validation phase -- this phase walks the grammar and visits all
// terminals. If using an external set of tokens, it checks that all
// terminals have a defined conversion to some pattern. Otherwise,
// it collects all terminals into the `all_literals` set for later use.
struct | <'grammar> {
grammar: &'grammar Grammar,
all_literals: Map<TerminalLiteral, Span>,
conversions: Option<Set<TerminalString>>,
}
impl<'grammar> Validator<'grammar> {
fn validate(&mut self) -> NormResult<()> {
for item in &self.grammar.items {
match *item {
GrammarItem::Use(..) => { }
GrammarItem::ExternToken(_) => { }
GrammarItem::InternToken(_) => { }
GrammarItem::Nonterminal(ref data) => {
for alternative in &data.alternatives {
try!(self.validate_alternative(alternative));
}
}
}
}
Ok(())
}
fn validate_alternative(&mut self, alternative: &Alternative) -> NormResult<()> {
assert!(alternative.condition.is_none()); // macro expansion should have removed these
try!(self.validate_expr(&alternative.expr));
Ok(())
}
fn validate_expr(&mut self, expr: &ExprSymbol) -> NormResult<()> {
for symbol in &expr.symbols {
try!(self.validate_symbol(symbol));
}
Ok(())
}
fn validate_symbol(&mut self, symbol: &Symbol) -> NormResult<()> {
match symbol.kind {
SymbolKind::Expr(ref expr) => {
try!(self.validate_expr(expr));
}
SymbolKind::Terminal(term) => {
try!(self.validate_terminal(symbol.span, term));
}
SymbolKind::Nonterminal(_) => {
}
SymbolKind::Repeat(ref repeat) => {
try!(self.validate_symbol(&repeat.symbol));
}
SymbolKind::Choose(ref sym) | SymbolKind::Name(_, ref sym) => {
try!(self.validate_symbol(sym));
}
SymbolKind::Lookahead | SymbolKind::Lookbehind => {
}
SymbolKind::AmbiguousId(id) => {
panic!("ambiguous id `{}` encountered after name resolution", id)
}
SymbolKind::Macro(..) => {
panic!("macro not removed: {:?}", symbol);
}
}
Ok(())
}
fn validate_terminal(&mut self, span: Span, term: TerminalString) -> NormResult<()> {
match self.conversions {
// If there is an extern token definition, validate that
// this terminal has a defined conversion.
Some(ref c) => {
if!c.contains(&term) {
return_err!(span, "terminal `{}` does not have a pattern defined for it",
term);
}
}
// If there is no extern token definition, then collect
// the terminal literals ("class", r"[a-z]+") into a set.
None => match term {
TerminalString::Bare(c) => {
// Bare identifiers like `x` can never be resolved
// as terminals unless there is a conversion
// defined for them that indicates they are a
// terminal; otherwise it's just an unresolved
// identifier.
panic!("bare literal `{}` without extern token definition", c);
}
TerminalString::Literal(l) => {
self.all_literals.entry(l).or_insert(span);
}
}
}
Ok(())
}
}
///////////////////////////////////////////////////////////////////////////
// Construction phase -- if we are constructing a tokenizer, this
// phase builds up an internal token DFA.
pub fn construct(grammar: &mut Grammar, literals_map: Map<TerminalLiteral, Span>) -> NormResult<()> {
let literals: Vec<TerminalLiteral> =
literals_map.keys()
.cloned()
.collect();
// Build up two vectors, one of parsed regular expressions and
// one of precedences, that are parallel with `literals`.
let mut regexs = Vec::with_capacity(literals.len());
let mut precedences = Vec::with_capacity(literals.len());
try!(intern::read(|interner| {
for &literal in &literals {
match literal {
TerminalLiteral::Quoted(s) => {
precedences.push(Precedence(1));
regexs.push(re::parse_literal(interner.data(s)));
}
TerminalLiteral::Regex(s) => {
precedences.push(Precedence(0));
match re::parse_regex(interner.data(s)) {
Ok(regex) => regexs.push(regex),
Err(error) => {
let literal_span = literals_map[&literal];
// FIXME -- take offset into account for
// span; this requires knowing how many #
// the user used, which we do not track
return_err!(
literal_span,
"invalid regular expression: {}",
error);
}
}
}
}
}
Ok(())
}));
let dfa = match dfa::build_dfa(®exs, &precedences) {
Ok(dfa) => dfa,
Err(DFAConstructionError::NFAConstructionError { index, error }) => {
let feature = match error {
NamedCaptures => r#"named captures (`(?P<foo>...)`)"#,
NonGreedy => r#""non-greedy" repetitions (`*?` or `+?`)"#,
WordBoundary => r#"word boundaries (`\b` or `\B`)"#,
LineBoundary => r#"line boundaries (`^` or `$`)"#,
TextBoundary => r#"text boundaries (`^` or `$`)"#,
};
let literal = literals[index.index()];
let span = literals_map[&literal];
return_err!(
span,
"{} are not supported in regular expressions",
feature)
}
Err(DFAConstructionError::Ambiguity { match0, match1 }) => {
let literal0 = literals[match0.index()];
let literal1 = literals[match1.index()];
let span0 = literals_map[&literal0];
let _span1 = literals_map[&literal1];
// FIXME(#88) -- it'd be nice to give an example here
return_err!(
span0,
"ambiguity detected between the terminal `{}` and the terminal `{}`",
literal0, literal1);
}
};
grammar.items.push(GrammarItem::InternToken(InternToken {
literals: literals,
dfa: dfa
}));
// we need to inject a `'input` lifetime and `input: &'input str` parameter as well:
let input_lifetime = intern(INPUT_LIFETIME);
for parameter in &grammar.type_parameters {
match *parameter {
TypeParameter::Lifetime(i) if i == input_lifetime => {
return_err!(
grammar.span,
"since there is no external token enum specified, \
the `'input` lifetime is implicit and cannot be declared");
}
_ => { }
}
}
let input_parameter = intern(INPUT_PARAMETER);
for parameter in &grammar.parameters {
if parameter.name == input_parameter {
return_err!(
grammar.span,
"since there is no external token enum specified, \
the `input` parameter is implicit and cannot be declared");
}
}
grammar.type_parameters.insert(0, TypeParameter::Lifetime(input_lifetime));
let parameter = Parameter {
name: input_parameter,
ty: TypeRef::Ref {
lifetime: Some(input_lifetime),
mutable: false,
referent: Box::new(TypeRef::Id(intern("str")))
}
};
grammar.parameters.push(parameter);
Ok(())
}
| Validator | identifier_name |
protocol.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Low-level wire protocol implementation. Currently only supports
//! [JSON packets]
//! (https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport#JSON_Packets).
use rustc_serialize::json::Json;
use rustc_serialize::json::ParserError::{IoError, SyntaxError};
use rustc_serialize::{json, Encodable};
use std::error::Error;
use std::io::{Read, Write};
use std::net::TcpStream;
pub trait JsonPacketStream {
fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T);
fn read_json_packet(&mut self) -> Result<Option<Json>, String>;
}
impl JsonPacketStream for TcpStream {
fn | <'a, T: Encodable>(&mut self, obj: &T) {
let s = json::encode(obj).unwrap().replace("__type__", "type");
println!("<- {}", s);
self.write_all(s.len().to_string().as_bytes()).unwrap();
self.write_all(&[':' as u8]).unwrap();
self.write_all(s.as_bytes()).unwrap();
}
fn read_json_packet<'a>(&mut self) -> Result<Option<Json>, String> {
// https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport
// In short, each JSON packet is [ascii length]:[JSON data of given length]
let mut buffer = vec!();
loop {
let mut buf = [0];
let byte = match self.read(&mut buf) {
Ok(0) => return Ok(None), // EOF
Ok(1) => buf[0],
Ok(_) => unreachable!(),
Err(e) => return Err(e.description().to_string()),
};
match byte {
b':' => {
let packet_len_str = match String::from_utf8(buffer) {
Ok(packet_len) => packet_len,
Err(_) => return Err("nonvalid UTF8 in packet length".to_string()),
};
let packet_len = match u64::from_str_radix(&packet_len_str, 10) {
Ok(packet_len) => packet_len,
Err(_) => return Err("packet length missing / not parsable".to_string()),
};
let mut packet = String::new();
self.take(packet_len).read_to_string(&mut packet).unwrap();
println!("{}", packet);
return match Json::from_str(&packet) {
Ok(json) => Ok(Some(json)),
Err(err) => match err {
IoError(ioerr) => return Err(ioerr.description().to_string()),
SyntaxError(_, l, c) => return Err(format!("syntax at {}:{}", l, c)),
},
};
},
c => buffer.push(c),
}
}
}
}
| write_json_packet | identifier_name |
protocol.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this | //! Low-level wire protocol implementation. Currently only supports
//! [JSON packets]
//! (https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport#JSON_Packets).
use rustc_serialize::json::Json;
use rustc_serialize::json::ParserError::{IoError, SyntaxError};
use rustc_serialize::{json, Encodable};
use std::error::Error;
use std::io::{Read, Write};
use std::net::TcpStream;
pub trait JsonPacketStream {
fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T);
fn read_json_packet(&mut self) -> Result<Option<Json>, String>;
}
impl JsonPacketStream for TcpStream {
fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T) {
let s = json::encode(obj).unwrap().replace("__type__", "type");
println!("<- {}", s);
self.write_all(s.len().to_string().as_bytes()).unwrap();
self.write_all(&[':' as u8]).unwrap();
self.write_all(s.as_bytes()).unwrap();
}
fn read_json_packet<'a>(&mut self) -> Result<Option<Json>, String> {
// https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport
// In short, each JSON packet is [ascii length]:[JSON data of given length]
let mut buffer = vec!();
loop {
let mut buf = [0];
let byte = match self.read(&mut buf) {
Ok(0) => return Ok(None), // EOF
Ok(1) => buf[0],
Ok(_) => unreachable!(),
Err(e) => return Err(e.description().to_string()),
};
match byte {
b':' => {
let packet_len_str = match String::from_utf8(buffer) {
Ok(packet_len) => packet_len,
Err(_) => return Err("nonvalid UTF8 in packet length".to_string()),
};
let packet_len = match u64::from_str_radix(&packet_len_str, 10) {
Ok(packet_len) => packet_len,
Err(_) => return Err("packet length missing / not parsable".to_string()),
};
let mut packet = String::new();
self.take(packet_len).read_to_string(&mut packet).unwrap();
println!("{}", packet);
return match Json::from_str(&packet) {
Ok(json) => Ok(Some(json)),
Err(err) => match err {
IoError(ioerr) => return Err(ioerr.description().to_string()),
SyntaxError(_, l, c) => return Err(format!("syntax at {}:{}", l, c)),
},
};
},
c => buffer.push(c),
}
}
}
} | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
| random_line_split |
protocol.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Low-level wire protocol implementation. Currently only supports
//! [JSON packets]
//! (https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport#JSON_Packets).
use rustc_serialize::json::Json;
use rustc_serialize::json::ParserError::{IoError, SyntaxError};
use rustc_serialize::{json, Encodable};
use std::error::Error;
use std::io::{Read, Write};
use std::net::TcpStream;
pub trait JsonPacketStream {
fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T);
fn read_json_packet(&mut self) -> Result<Option<Json>, String>;
}
impl JsonPacketStream for TcpStream {
fn write_json_packet<'a, T: Encodable>(&mut self, obj: &T) |
fn read_json_packet<'a>(&mut self) -> Result<Option<Json>, String> {
// https://wiki.mozilla.org/Remote_Debugging_Protocol_Stream_Transport
// In short, each JSON packet is [ascii length]:[JSON data of given length]
let mut buffer = vec!();
loop {
let mut buf = [0];
let byte = match self.read(&mut buf) {
Ok(0) => return Ok(None), // EOF
Ok(1) => buf[0],
Ok(_) => unreachable!(),
Err(e) => return Err(e.description().to_string()),
};
match byte {
b':' => {
let packet_len_str = match String::from_utf8(buffer) {
Ok(packet_len) => packet_len,
Err(_) => return Err("nonvalid UTF8 in packet length".to_string()),
};
let packet_len = match u64::from_str_radix(&packet_len_str, 10) {
Ok(packet_len) => packet_len,
Err(_) => return Err("packet length missing / not parsable".to_string()),
};
let mut packet = String::new();
self.take(packet_len).read_to_string(&mut packet).unwrap();
println!("{}", packet);
return match Json::from_str(&packet) {
Ok(json) => Ok(Some(json)),
Err(err) => match err {
IoError(ioerr) => return Err(ioerr.description().to_string()),
SyntaxError(_, l, c) => return Err(format!("syntax at {}:{}", l, c)),
},
};
},
c => buffer.push(c),
}
}
}
}
| {
let s = json::encode(obj).unwrap().replace("__type__", "type");
println!("<- {}", s);
self.write_all(s.len().to_string().as_bytes()).unwrap();
self.write_all(&[':' as u8]).unwrap();
self.write_all(s.as_bytes()).unwrap();
} | identifier_body |
main.rs | #![feature(iter_arith)]
#[macro_use] extern crate libeuler;
// Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over
// five-thousand first names, begin by sorting it into alphabetical order. Then working out the
// alphabetical value for each name, multiply this value by its alphabetical position in the list
// to obtain a name score.
//
// For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12
// + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 =
// 49714.
//
// What is the total of all the name scores in the file?
fn main() { |
let mut names: Vec<&str> = include_str!("names.txt")
.split(",")
.map(|a| a.trim_matches(&['"'] as &[char]))
.collect();
names.sort();
solutions! {
sol naive {
names.iter().zip(0..names.len()).map(|(&name, index)| {
name.chars()
.map(|c| c as u64 - 'A' as u64 + 1)
.sum::<u64>() * (index as u64 + 1)
}).sum::<u64>()
}
}
}
| identifier_body |
|
main.rs | #![feature(iter_arith)]
#[macro_use] extern crate libeuler;
| // to obtain a name score.
//
// For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12
// + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 =
// 49714.
//
// What is the total of all the name scores in the file?
fn main() {
let mut names: Vec<&str> = include_str!("names.txt")
.split(",")
.map(|a| a.trim_matches(&['"'] as &[char]))
.collect();
names.sort();
solutions! {
sol naive {
names.iter().zip(0..names.len()).map(|(&name, index)| {
name.chars()
.map(|c| c as u64 - 'A' as u64 + 1)
.sum::<u64>() * (index as u64 + 1)
}).sum::<u64>()
}
}
} | // Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over
// five-thousand first names, begin by sorting it into alphabetical order. Then working out the
// alphabetical value for each name, multiply this value by its alphabetical position in the list | random_line_split |
main.rs | #![feature(iter_arith)]
#[macro_use] extern crate libeuler;
// Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over
// five-thousand first names, begin by sorting it into alphabetical order. Then working out the
// alphabetical value for each name, multiply this value by its alphabetical position in the list
// to obtain a name score.
//
// For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12
// + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 =
// 49714.
//
// What is the total of all the name scores in the file?
fn m | ) {
let mut names: Vec<&str> = include_str!("names.txt")
.split(",")
.map(|a| a.trim_matches(&['"'] as &[char]))
.collect();
names.sort();
solutions! {
sol naive {
names.iter().zip(0..names.len()).map(|(&name, index)| {
name.chars()
.map(|c| c as u64 - 'A' as u64 + 1)
.sum::<u64>() * (index as u64 + 1)
}).sum::<u64>()
}
}
}
| ain( | identifier_name |
bytes.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* The compiler code necessary to support the bytes! extension. */
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
pub fn expand_syntax_ext<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
cx.span_warn(sp, "`bytes!` is deprecated, use `b\"foo\"` literals instead");
cx.parse_sess.span_diagnostic.span_note(sp,
"see http://doc.rust-lang.org/reference.html#byte-and-byte-string-literals \
for documentation");
cx.parse_sess.span_diagnostic.span_note(sp,
"see https://github.com/rust-lang/rust/blob/master/src/etc/2014-06-rewrite-bytes-macros.py \
for an automated migration");
// Gather all argument expressions
let exprs = match get_exprs_from_tts(cx, sp, tts) {
None => return DummyResult::expr(sp),
Some(e) => e,
};
let mut bytes = Vec::new();
let mut err = false;
for expr in exprs.iter() {
match expr.node {
// expression is a literal
ast::ExprLit(ref lit) => match lit.node {
// string literal, push each byte to vector expression
ast::LitStr(ref s, _) => {
for byte in s.get().bytes() {
bytes.push(cx.expr_u8(expr.span, byte));
}
}
// u8 literal, push to vector expression
ast::LitInt(v, ast::UnsignedIntLit(ast::TyU8)) => {
if v > 0xFF {
cx.span_err(expr.span, "too large u8 literal in bytes!");
err = true;
} else {
bytes.push(cx.expr_u8(expr.span, v as u8));
}
}
// integer literal, push to vector expression
ast::LitInt(_, ast::UnsuffixedIntLit(ast::Minus)) => {
cx.span_err(expr.span, "negative integer literal in bytes!");
err = true;
}
ast::LitInt(v, ast::UnsuffixedIntLit(ast::Plus)) => {
if v > 0xFF {
cx.span_err(expr.span, "too large integer literal in bytes!");
err = true;
} else {
bytes.push(cx.expr_u8(expr.span, v as u8));
}
}
// char literal, push to vector expression
ast::LitChar(v) => |
_ => {
cx.span_err(expr.span, "unsupported literal in bytes!");
err = true;
}
},
_ => {
cx.span_err(expr.span, "non-literal in bytes!");
err = true;
}
}
}
// For some reason using quote_expr!() here aborts if we threw an error.
// I'm assuming that the end of the recursive parse tricks the compiler
// into thinking this is a good time to stop. But we'd rather keep going.
if err {
// Since the compiler will stop after the macro expansion phase anyway, we
// don't need type info, so we can just return a DummyResult
return DummyResult::expr(sp);
}
let len = bytes.len();
let e = cx.expr_vec(sp, bytes);
let ty = cx.ty(sp, ast::TyFixedLengthVec(cx.ty_ident(sp, cx.ident_of("u8")),
cx.expr_uint(sp, len)));
let item = cx.item_static(sp, cx.ident_of("BYTES"), ty, ast::MutImmutable, e);
let ret = cx.expr_ident(sp, cx.ident_of("BYTES"));
let ret = cx.expr_addr_of(sp, ret);
let e = cx.expr_block(cx.block(sp, vec![cx.stmt_item(sp, item)],
Some(ret)));
MacExpr::new(e)
}
| {
if v.is_ascii() {
bytes.push(cx.expr_u8(expr.span, v as u8));
} else {
cx.span_err(expr.span, "non-ascii char literal in bytes!");
err = true;
}
} | conditional_block |
bytes.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* The compiler code necessary to support the bytes! extension. */
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
pub fn expand_syntax_ext<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> | ast::ExprLit(ref lit) => match lit.node {
// string literal, push each byte to vector expression
ast::LitStr(ref s, _) => {
for byte in s.get().bytes() {
bytes.push(cx.expr_u8(expr.span, byte));
}
}
// u8 literal, push to vector expression
ast::LitInt(v, ast::UnsignedIntLit(ast::TyU8)) => {
if v > 0xFF {
cx.span_err(expr.span, "too large u8 literal in bytes!");
err = true;
} else {
bytes.push(cx.expr_u8(expr.span, v as u8));
}
}
// integer literal, push to vector expression
ast::LitInt(_, ast::UnsuffixedIntLit(ast::Minus)) => {
cx.span_err(expr.span, "negative integer literal in bytes!");
err = true;
}
ast::LitInt(v, ast::UnsuffixedIntLit(ast::Plus)) => {
if v > 0xFF {
cx.span_err(expr.span, "too large integer literal in bytes!");
err = true;
} else {
bytes.push(cx.expr_u8(expr.span, v as u8));
}
}
// char literal, push to vector expression
ast::LitChar(v) => {
if v.is_ascii() {
bytes.push(cx.expr_u8(expr.span, v as u8));
} else {
cx.span_err(expr.span, "non-ascii char literal in bytes!");
err = true;
}
}
_ => {
cx.span_err(expr.span, "unsupported literal in bytes!");
err = true;
}
},
_ => {
cx.span_err(expr.span, "non-literal in bytes!");
err = true;
}
}
}
// For some reason using quote_expr!() here aborts if we threw an error.
// I'm assuming that the end of the recursive parse tricks the compiler
// into thinking this is a good time to stop. But we'd rather keep going.
if err {
// Since the compiler will stop after the macro expansion phase anyway, we
// don't need type info, so we can just return a DummyResult
return DummyResult::expr(sp);
}
let len = bytes.len();
let e = cx.expr_vec(sp, bytes);
let ty = cx.ty(sp, ast::TyFixedLengthVec(cx.ty_ident(sp, cx.ident_of("u8")),
cx.expr_uint(sp, len)));
let item = cx.item_static(sp, cx.ident_of("BYTES"), ty, ast::MutImmutable, e);
let ret = cx.expr_ident(sp, cx.ident_of("BYTES"));
let ret = cx.expr_addr_of(sp, ret);
let e = cx.expr_block(cx.block(sp, vec![cx.stmt_item(sp, item)],
Some(ret)));
MacExpr::new(e)
}
| {
cx.span_warn(sp, "`bytes!` is deprecated, use `b\"foo\"` literals instead");
cx.parse_sess.span_diagnostic.span_note(sp,
"see http://doc.rust-lang.org/reference.html#byte-and-byte-string-literals \
for documentation");
cx.parse_sess.span_diagnostic.span_note(sp,
"see https://github.com/rust-lang/rust/blob/master/src/etc/2014-06-rewrite-bytes-macros.py \
for an automated migration");
// Gather all argument expressions
let exprs = match get_exprs_from_tts(cx, sp, tts) {
None => return DummyResult::expr(sp),
Some(e) => e,
};
let mut bytes = Vec::new();
let mut err = false;
for expr in exprs.iter() {
match expr.node {
// expression is a literal | identifier_body |
bytes.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* The compiler code necessary to support the bytes! extension. */
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
pub fn | <'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
cx.span_warn(sp, "`bytes!` is deprecated, use `b\"foo\"` literals instead");
cx.parse_sess.span_diagnostic.span_note(sp,
"see http://doc.rust-lang.org/reference.html#byte-and-byte-string-literals \
for documentation");
cx.parse_sess.span_diagnostic.span_note(sp,
"see https://github.com/rust-lang/rust/blob/master/src/etc/2014-06-rewrite-bytes-macros.py \
for an automated migration");
// Gather all argument expressions
let exprs = match get_exprs_from_tts(cx, sp, tts) {
None => return DummyResult::expr(sp),
Some(e) => e,
};
let mut bytes = Vec::new();
let mut err = false;
for expr in exprs.iter() {
match expr.node {
// expression is a literal
ast::ExprLit(ref lit) => match lit.node {
// string literal, push each byte to vector expression
ast::LitStr(ref s, _) => {
for byte in s.get().bytes() {
bytes.push(cx.expr_u8(expr.span, byte));
}
}
// u8 literal, push to vector expression
ast::LitInt(v, ast::UnsignedIntLit(ast::TyU8)) => {
if v > 0xFF {
cx.span_err(expr.span, "too large u8 literal in bytes!");
err = true;
} else {
bytes.push(cx.expr_u8(expr.span, v as u8));
}
}
// integer literal, push to vector expression
ast::LitInt(_, ast::UnsuffixedIntLit(ast::Minus)) => {
cx.span_err(expr.span, "negative integer literal in bytes!");
err = true;
}
ast::LitInt(v, ast::UnsuffixedIntLit(ast::Plus)) => {
if v > 0xFF {
cx.span_err(expr.span, "too large integer literal in bytes!");
err = true;
} else {
bytes.push(cx.expr_u8(expr.span, v as u8));
}
}
// char literal, push to vector expression
ast::LitChar(v) => {
if v.is_ascii() {
bytes.push(cx.expr_u8(expr.span, v as u8));
} else {
cx.span_err(expr.span, "non-ascii char literal in bytes!");
err = true;
}
}
_ => {
cx.span_err(expr.span, "unsupported literal in bytes!");
err = true;
}
},
_ => {
cx.span_err(expr.span, "non-literal in bytes!");
err = true;
}
}
}
// For some reason using quote_expr!() here aborts if we threw an error.
// I'm assuming that the end of the recursive parse tricks the compiler
// into thinking this is a good time to stop. But we'd rather keep going.
if err {
// Since the compiler will stop after the macro expansion phase anyway, we
// don't need type info, so we can just return a DummyResult
return DummyResult::expr(sp);
}
let len = bytes.len();
let e = cx.expr_vec(sp, bytes);
let ty = cx.ty(sp, ast::TyFixedLengthVec(cx.ty_ident(sp, cx.ident_of("u8")),
cx.expr_uint(sp, len)));
let item = cx.item_static(sp, cx.ident_of("BYTES"), ty, ast::MutImmutable, e);
let ret = cx.expr_ident(sp, cx.ident_of("BYTES"));
let ret = cx.expr_addr_of(sp, ret);
let e = cx.expr_block(cx.block(sp, vec![cx.stmt_item(sp, item)],
Some(ret)));
MacExpr::new(e)
}
| expand_syntax_ext | identifier_name |
bytes.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* The compiler code necessary to support the bytes! extension. */
use ast;
use codemap::Span;
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
pub fn expand_syntax_ext<'cx>(cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[ast::TokenTree])
-> Box<base::MacResult+'cx> {
cx.span_warn(sp, "`bytes!` is deprecated, use `b\"foo\"` literals instead");
cx.parse_sess.span_diagnostic.span_note(sp,
"see http://doc.rust-lang.org/reference.html#byte-and-byte-string-literals \
for documentation");
cx.parse_sess.span_diagnostic.span_note(sp,
"see https://github.com/rust-lang/rust/blob/master/src/etc/2014-06-rewrite-bytes-macros.py \
for an automated migration");
// Gather all argument expressions
let exprs = match get_exprs_from_tts(cx, sp, tts) {
None => return DummyResult::expr(sp),
Some(e) => e,
};
let mut bytes = Vec::new();
let mut err = false;
for expr in exprs.iter() {
match expr.node {
// expression is a literal
ast::ExprLit(ref lit) => match lit.node {
// string literal, push each byte to vector expression
ast::LitStr(ref s, _) => {
for byte in s.get().bytes() {
bytes.push(cx.expr_u8(expr.span, byte));
}
}
// u8 literal, push to vector expression
ast::LitInt(v, ast::UnsignedIntLit(ast::TyU8)) => {
if v > 0xFF {
cx.span_err(expr.span, "too large u8 literal in bytes!");
err = true;
} else {
bytes.push(cx.expr_u8(expr.span, v as u8));
}
}
// integer literal, push to vector expression | cx.span_err(expr.span, "negative integer literal in bytes!");
err = true;
}
ast::LitInt(v, ast::UnsuffixedIntLit(ast::Plus)) => {
if v > 0xFF {
cx.span_err(expr.span, "too large integer literal in bytes!");
err = true;
} else {
bytes.push(cx.expr_u8(expr.span, v as u8));
}
}
// char literal, push to vector expression
ast::LitChar(v) => {
if v.is_ascii() {
bytes.push(cx.expr_u8(expr.span, v as u8));
} else {
cx.span_err(expr.span, "non-ascii char literal in bytes!");
err = true;
}
}
_ => {
cx.span_err(expr.span, "unsupported literal in bytes!");
err = true;
}
},
_ => {
cx.span_err(expr.span, "non-literal in bytes!");
err = true;
}
}
}
// For some reason using quote_expr!() here aborts if we threw an error.
// I'm assuming that the end of the recursive parse tricks the compiler
// into thinking this is a good time to stop. But we'd rather keep going.
if err {
// Since the compiler will stop after the macro expansion phase anyway, we
// don't need type info, so we can just return a DummyResult
return DummyResult::expr(sp);
}
let len = bytes.len();
let e = cx.expr_vec(sp, bytes);
let ty = cx.ty(sp, ast::TyFixedLengthVec(cx.ty_ident(sp, cx.ident_of("u8")),
cx.expr_uint(sp, len)));
let item = cx.item_static(sp, cx.ident_of("BYTES"), ty, ast::MutImmutable, e);
let ret = cx.expr_ident(sp, cx.ident_of("BYTES"));
let ret = cx.expr_addr_of(sp, ret);
let e = cx.expr_block(cx.block(sp, vec![cx.stmt_item(sp, item)],
Some(ret)));
MacExpr::new(e)
} | ast::LitInt(_, ast::UnsuffixedIntLit(ast::Minus)) => { | random_line_split |
lib.rs | //! [](https://travis-ci.org/lawliet89/rowdy)
//! [](https://dependencyci.com/github/lawliet89/rowdy)
//! [](https://crates.io/crates/rowdy)
//! [](https://github.com/lawliet89/rowdy)
//! [](https://docs.rs/rowdy)
//!
//! Documentation: [Stable](https://docs.rs/rowdy) | [Master](https://lawliet89.github.io/rowdy/)
//!
//! `rowdy` is a [Rocket](https://rocket.rs/) based JSON Web token based authentication server
//! based off Docker Registry's
//! [authentication protocol](https://docs.docker.com/registry/spec/auth/).
//!
//! # Features
//!
//! - `simple_authenticator`: A simple CSV based authenticator
//! - `ldap_authenticator`: An LDAP based authenticator
//!
//! By default, the `simple_authenticator` feature is turned on.
//!
//! # `rowdy` Authentication Flow
//!
//! The authentication flow is inspired by
//! [Docker Registry](https://docs.docker.com/registry/spec/auth/) authentication specification.
//!
//! ## JSON Web Tokens
//!
//! Authentication makes use of two types of [JSON Web Tokens (JWT)](https://jwt.io/):
//! Access and Refresh tokens.
//!
//! ### Access Token
//!
//! The access token is a short lived JWT that allows users to access resources within the scope
//! that they are allowed to. The access token itself contains enough information for services
//! to verify the user and their permissions in a stateless manner.
//!
//! ### Refresh Token
//!
//! The refresh token allows users to retrieve a new access token without needing to
//! re-authenticate. As such, the refresh token is longer lived, but can be revoked.
//!
//! ## Authentication Flow
//!
//! 1. Client attempts to access a resource on a protected service.
//! 1. Service responds with a `401 Unauthorized` authentication challenge with information on
//! how to authenticate
//! provided in the `WWW-Authenticate` response header.
//! 1. Using the information from the previous step, the client authenticates with the
//! authentication server. The client
//! will receive, among other information, opaque access and refresh tokens.
//! 1. The client retries the original request with the Bearer token embedded in the request’s
//! Authorization header.
//! 1. The service authorizes the client by validating the Bearer token and the claim set
//! embedded within it and
//! proceeds as usual.
//!
//! ### Authentication Challenge
//!
//! Services will challenge users who do not provide a valid token via the HTTP response
//! `401 Unauthorized`. Details for
//! authentication is provided in the `WWW-Authenticate` header.
//!
//! ```text
//! Www-Authenticate: Bearer realm="https://www.auth.com",service="https://www.example.com",scope="all"
//! ```
//!
//! The `realm` field indicates the authentcation server endpoint which clients should proceed to
//! authenticate against.
//!
//! The `service` field indicates the `service` value that clients should use when attempting to
//! authenticate at `realm`.
//!
//! The `scope` field indicates the `scope` value that clients should use when attempting to
//! authenticate at `realm`.
//!
//! ### Retrieving an Access Token (and optionally Refresh Token) from the Authentication Server
//!
//! A HTTP `GET` request should be made to the `realm` endpoint provided above. The endpoint will
//! support the following uery paremeters:
//!
//! - `service`: The service that the client is authenticating for. This should be the same as
//! the `service` value in the previous step
//! - `scope`: The scope that the client wishes to authenticate for.
//! This should be the same as the `scope` value in the previous step.
//! - `offline_token`: Set to `true` if a refresh token is also required. Defaults to `false`.
//! Cannot be set to `true` when using a refresh token to retrieve a new access token.
//!
//! When authenticating for the first time, clients should send the user's username and passwords
//! in the form of `Basic` authentication. If the client already has a prior refresh token and
//! would like to obtain a new access token, the client should send the refresh token in the form
//! of `Bearer` authentication.
//!
//! If successful, the authentcation server will return a `200 OK` response with a
//! JSON body containing the following fields:
//!
//! - `token`: An opaque Access (`Bearer`) token that clients should supply to subsequent requests
//! in the `Authorization` header.
//! - `expires_in`: The duration in seconds since the token was issued that it will remain valid.
//! - `issued_at`: RFC3339-serialized UTC standard time at which a given token was issued.
//! - `refresh_token`: An opaque `Refresh` token which can be used to get additional access
//! tokens for the same subject with different scopes. This token should be kept secure by
//! the client and only sent to the authorization server which issues access tokens.
//! This field will only be set when `offline_token=true` is provided in the request.
//!
//! If this fails, the server will return with the appropriate `4xx` response.
//!
//! ### Using the Access Token
//!
//! Once the client has a token, it will try the request again with the token placed in the
//! HTTP Authorization header like so:
//!
//! ```text
//! Authorization: Bearer <token>
//! ```
//!
//! ### Using the Refresh Token to Retrieve a New Access Token
//!
//! When the client's Access token expires, and it has previously asked for a Refresh Token,
//! the client can make a `GET` request to the same endpoint that the client used to retrieve the
//! access token (the `realm` URL in an authentication challenge).
//!
//! The steps are described in the section "Retrieving an Access Token" above. The process is the
//! same as the initial authentication except that instead of using `Basic` authentication,
//! the client should instead send the refresh token retrieved prior as `Bearer` authentication.
//! Also, `offline_token` cannot be requested for when requesting for a new access token using a
//! refresh token. (HTTP 401 will be returned if this happens.)
//!
//! ### Example
//!
//! This example uses `curl` to make request to the some (hypothetical) protected endpoint.
//! It requires [`jq`](https://stedolan.github.io/jq/) to parse JSON.
//!
//! ```bash
//! PROTECTED_RESOURCE="https://www.example.com/protected/resource/"
//!
//! # Save the response headers of our first request to the endpoint to get the Www-Authenticate
//! # header
//! RESPONSE_HEADER=$(tempfile);
//! curl --dump-header "${RESPONSE_HEADER}" "${PROTECTED_RESOURCE}"
//!
//! # Extract the realm, the service, and the scope from the Www-Authenticate header
//! WWWAUTH=$(cat "${RESPONSE_HEADER}" | grep "Www-Authenticate")
//! REALM=$(echo "${WWWAUTH}" | grep -o '\(realm\)="[^"]*"' | cut -d '"' -f 2)
//! SERVICE=$(echo "${WWWAUTH}" | grep -o '\(service\)="[^"]*"' | cut -d '"' -f 2)
//! SCOPE=$(echo "${WWWAUTH}" | grep -o '\(scope\)="[^"]*"' | cut -d '"' -f 2)
//!
//! # Build the URL to query the auth server
//! AUTH_URL="${REALM}?service=${SERVICE}&scope=${SCOPE}&offline_token=true"
//!
//! # Query the auth server to get a token -- replace the username and password
//! # below with the value from 1password
//! TOKEN=$(curl -s --user "mozart:password" "${AUTH_URL}")
//!
//! # Get the access token from the JSON string: {"token": "...."}
//! ACCESS_TOKEN=$(echo ${TOKEN} | jq.token | tr -d '"')
//!
//! # Query the resource again, but this time with a bearer token
//! curl -v -H "Authorization: Bearer ${ACCESS_TOKEN}" "${PROTECTED_RESOURCE}"
//!
//! # Get the refresh token
//! REFRESH_TOKEN=$(echo "${TOKEN}" | jq.refresh_token | tr -d '"')
//!
//! # Get a new access token
//! NEW_TOKEN=$(curl --header "Authorization: Bearer ${REFRESH_TOKEN}" "${AUTH_URL}")
//!
//! # Parse the new access token
//! NEW_ACCESS_TOKEN=$(echo "${TOKEN}" | jq.token | tr -d '"')
//!
//! # Query the resource again, but this time with a new access token
//! curl -v -H "Authorization: Bearer ${NEW_ACCESS_TOKEN}" "${PROTECTED_RESOURCE}"
//! ```
//!
//! ## Scope
//!
//! Not in use at the moment. Just use `all`.
//!
#![feature(proc_macro_hygiene, decl_macro)]
// See https://github.com/rust-unofficial/patterns/blob/master/anti_patterns/deny-warnings.md
#![allow(
legacy_directory_ownership,
missing_copy_implementations,
missing_debug_implementations,
unknown_lints,
unsafe_code,
intra_doc_link_resolution_failure
)]
#![deny(
const_err,
dead_code,
deprecated,
exceeding_bitshifts,
improper_ctypes,
missing_docs,
mutable_transmutes,
no_mangle_const_items,
non_camel_case_types,
non_shorthand_field_patterns,
non_upper_case_globals,
overflowing_literals,
path_statements,
plugin_as_library,
stable_features,
trivial_casts,
trivial_numeric_casts,
unconditional_recursion,
unknown_crate_types,
unreachable_code,
unused_allocation,
unused_assignments,
unused_attributes,
unused_comparisons,
unused_extern_crates,
unused_features,
unused_imports,
unused_import_braces,
unused_qualifications,
unused_must_use,
unused_mut,
unused_parens,
unused_results,
unused_unsafe,
unused_variables,
variant_size_differences,
warnings,
while_true
)]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
use biscuit as jwt;
use hyper;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[macro_use]
extern crate rocket;
// we are using the "log_!" macros which are redefined from `log`'s
use rocket_cors as cors;
#[macro_use]
extern crate serde_derive;
use serde_json;
#[cfg(test)]
extern crate serde_test;
#[macro_use]
mod macros;
#[cfg(test)]
#[macro_use]
mod test;
pub mod auth;
mod routes;
pub mod serde_custom;
pub mod token;
pub use self::routes::routes;
use std::error;
use std::fmt;
use std::io;
use std::ops::Deref;
use std::str::FromStr;
use ring::rand::SystemRandom;
use rocket::http::Status;
use rocket::response::{Responder, Response};
use rocket::Request;
use serde::de;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub use serde_json::Map as JsonMap;
pub use serde_json::Value as JsonValue;
/// Top level error enum
#[derive(Debug)]
pub enum Error {
/// A generic/unknown error
GenericError(String),
/// A bad request resulting from bad request parameters/headers
BadRequest(String),
/// Authentication error
Auth(auth::Error),
/// CORS error
CORS(cors::Error),
/// Token Error
Token(token::Error),
/// IO errors
IOError(io::Error),
/// An error launcing Rocket
LaunchError(rocket::error::LaunchError),
/// Unsupported operation
UnsupportedOperation,
}
impl_from_error!(auth::Error, Error::Auth);
impl_from_error!(cors::Error, Error::CORS);
impl_from_error!(token::Error, Error::Token);
impl_from_error!(String, Error::GenericError);
impl_from_error!(io::Error, Error::IOError);
impl_from_error!(rocket::error::LaunchError, Error::LaunchError);
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::UnsupportedOperation => "This operation is not supported",
Error::Auth(ref e) => e.description(),
Error::CORS(ref e) => e.description(),
Error::Token(ref e) => e.description(),
Error::IOError(ref e) => e.description(),
Error::LaunchError(ref e) => e.description(),
Error::GenericError(ref e) | Error::BadRequest(ref e) => e,
}
}
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
Error::Auth(ref e) => Some(e),
Error::CORS(ref e) => Some(e),
Error::Token(ref e) => Some(e),
Error::IOError(ref e) => Some(e),
Error::LaunchError(ref e) => Some(e),
Error::UnsupportedOperation | Error::GenericError(_) | Error::BadRequest(_) => {
Some(self)
}
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::UnsupportedOperation => write!(f, "{}", error::Error::description(self)),
Error::Auth(ref e) => fmt::Display::fmt(e, f),
Error::CORS(ref e) => fmt::Display::fmt(e, f),
Error::Token(ref e) => fmt::Display::fmt(e, f),
Error::IOError(ref e) => fmt::Display::fmt(e, f),
Error::GenericError(ref e) => fmt::Display::fmt(e, f),
Error::LaunchError(ref e) => fmt::Display::fmt(e, f),
Error::BadRequest(ref e) => fmt::Display::fmt(e, f),
}
}
}
impl<'r> Responder<'r> for Error {
fn respond_to(self, request: &Request<'_>) -> Result<Response<'r>, Status> {
match self {
Error::Auth(e) => e.respond_to(request),
Error::CORS(e) => e.respond_to(request),
Error::Token(e) => e.respond_to(request),
Error::BadRequest(e) => {
error_!("{}", e);
Err(Status::BadRequest)
}
e => {
error_!("{}", e);
Err(Status::InternalServerError)
}
}
}
}
/// Wrapper around `hyper::Url` with `Serialize` and `Deserialize` implemented
#[derive(Clone, Eq, PartialEq, Hash, Debug)]
pub struct Url(hyper::Url);
impl_deref!(Url, hyper::Url);
impl FromStr for Url {
type Err = hyper::error::ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Url(hyper::Url::from_str(s)?))
}
}
impl fmt::Display for Url {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0.as_str())
}
}
impl Serialize for Url {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.0.as_str())
}
}
impl<'de> Deserialize<'de> for Url {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct UrlVisitor;
impl<'de> de::Visitor<'de> for UrlVisitor {
type Value = Url;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a valid URL string")
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Url(
hyper::Url::from_str(&value).map_err(|e| E::custom(e.to_string()))?
))
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Url(
hyper::Url::from_str(value).map_err(|e| E::custom(e.to_string()))?
))
}
}
deserializer.deserialize_string(UrlVisitor)
}
}
/// A sequence of bytes, either as an array of unsigned 8 bit integers, or a string which will be
/// treated as UTF-8.
/// This enum is (de)serialized [`untagged`](https://serde.rs/enum-representations.html).
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]
#[serde(untagged)]
pub enum ByteSequence {
/// A string which will be converted to UTF-8 and then to bytes.
String(String),
/// A sequence of unsigned 8 bits integers which will be treated as bytes.
Bytes(Vec<u8>),
}
impl ByteSequence {
/// Returns the byte sequence.
pub fn as_bytes(&self) -> Vec<u8> {
match *self {
ByteSequence::String(ref string) => string.to_string().into_bytes(),
ByteSequence::Bytes(ref bytes) => bytes.to_vec(),
}
}
}
/// Application configuration. Usually deserialized from JSON for use.
///
/// The type parameter `B` is the [`auth::AuthenticatorConfiguration`] and by its associated
/// type, the `Authenticator` that is going to be used for HTTP Basic Authentication.
///
/// # Examples
/// ```
/// extern crate rowdy;
/// extern crate serde_json;
///
/// use rowdy::Configuration;
/// use rowdy::auth::NoOpConfiguration;
///
/// # fn main() {
/// // We are using the `NoOp` authenticator
/// let json = r#"{
/// "token" : {
/// "issuer": "https://www.acme.com",
/// "allowed_origins": { "Some": ["https://www.example.com", "https://www.foobar.com"] },
/// "audience": ["https://www.example.com", "https://www.foobar.com"],
/// "signature_algorithm": "RS256",
/// "secret": {
/// "rsa_private": "test/fixtures/rsa_private_key.der",
/// "rsa_public": "test/fixtures/rsa_public_key.der"
/// },
/// "expiry_duration": 86400
/// },
/// "basic_authenticator": {}
/// }"#;
/// let config: Configuration<NoOpConfiguration> = serde_json::from_str(json).unwrap();
/// let rocket = config.ignite().unwrap().mount("/", rowdy::routes());
/// // then `rocket.launch()`!
/// # }
/// ```
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Configuration<B> {
/// Token configuration. See the type documentation for deserialization examples
pub token: token::Configuration,
/// The configuration for the authenticator that will handle HTTP Basic Authentication.
pub basic_authenticator: B,
}
impl<B: auth::AuthenticatorConfiguration<auth::Basic>> Configuration<B> {
/// Ignites the rocket with various configuration objects, but does not mount any routes.
/// Remember to mount routes and call `launch` on the returned Rocket object.
/// See the struct documentation for an example.
pub fn ignite(&self) -> Result<rocket::Rocket, Error> {
let token_getter_cors_options = self.token.cors_option();
let basic_authenticator = self.basic_authenticator.make_authenticator()?;
let basic_authenticator: Box<auth::BasicAuthenticator> = Box::new(basic_authenticator);
// Prepare the keys
let keys = self.token.keys()?;
Ok(rocket::ignite()
.manage(self.token.clone())
.manage(basic_authenticator)
.manage(keys)
.attach(token_getter_cors_options))
}
}
/// Convenience function to ignite and launch rowdy. This function will never return
///
/// # Panics
/// Panics if during the Rocket igition, something goes wrong.
///
/// # Example
/// ```rust,no_run
/// extern crate rowdy;
/// extern crate serde_json;
///
/// use rowdy::Configuration;
/// use rowdy::auth::NoOpConfiguration;
///
/// # fn main() {
/// // We are using the `NoOp` authenticator
/// let json = r#"{
/// "token" : {
/// "issuer": "https://www.acme.com",
/// "allowed_origins": ["https://www.example.com", "https://www.foobar.com"],
/// "audience": ["https://www.example.com", "https://www.foobar.com"],
/// "signature_algorithm": "RS256",
/// "secret": {
/// "rsa_private": "test/fixtures/rsa_private_key.der",
/// "rsa_public": "test/fixtures/rsa_public_key.der"
/// },
/// "expiry_duration": 86400
/// },
/// "basic_authenticator": {}
/// }"#;
/// let config: Configuration<NoOpConfiguration> = serde_json::from_str(json).unwrap();
///
/// rowdy::launch(config);
/// # }
/// ```
pub fn launch<B: auth::AuthenticatorConfiguration<auth::Basic>>(
config: Configuration<B>,
) -> rocket::error::LaunchError {
let rocket = config.ignite().unwrap_or_else(|e| panic!("{}", e));
rocket.mount("/", routes()).launch()
}
/// Return a psuedo random number generator
pub(crate) fn rng() -> &'static SystemRandom {
use std::ops::Deref;
lazy_static! {
static ref RANDOM: SystemRandom = SystemRandom::new();
}
RANDOM.deref()
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use serde_test::{assert_tokens, Token};
use super::*;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
struct TestUrl {
url: Url,
}
#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
struct TestClaims {
company: String,
department: String,
}
impl Default for TestClaims {
fn default() -> Self {
TestClaims {
company: "ACME".to_string(),
department: "Toilet Cleaning".to_string(),
}
}
}
#[test]
fn ur | {
let test = TestUrl {
url: not_err!(Url::from_str("https://www.example.com/")),
};
assert_tokens(
&test,
&[
Token::Struct {
name: "TestUrl",
len: 1,
},
Token::Str("url"),
Token::Str("https://www.example.com/"),
Token::StructEnd,
],
);
}
}
| l_serialization_token_round_trip() | identifier_name |
lib.rs | //! [](https://travis-ci.org/lawliet89/rowdy)
//! [](https://dependencyci.com/github/lawliet89/rowdy)
//! [](https://crates.io/crates/rowdy)
//! [](https://github.com/lawliet89/rowdy)
//! [](https://docs.rs/rowdy)
//!
//! Documentation: [Stable](https://docs.rs/rowdy) | [Master](https://lawliet89.github.io/rowdy/)
//!
//! `rowdy` is a [Rocket](https://rocket.rs/) based JSON Web token based authentication server
//! based off Docker Registry's
//! [authentication protocol](https://docs.docker.com/registry/spec/auth/).
//!
//! # Features
//!
//! - `simple_authenticator`: A simple CSV based authenticator
//! - `ldap_authenticator`: An LDAP based authenticator
//!
//! By default, the `simple_authenticator` feature is turned on.
//!
//! # `rowdy` Authentication Flow
//!
//! The authentication flow is inspired by
//! [Docker Registry](https://docs.docker.com/registry/spec/auth/) authentication specification.
//!
//! ## JSON Web Tokens
//!
//! Authentication makes use of two types of [JSON Web Tokens (JWT)](https://jwt.io/):
//! Access and Refresh tokens.
//!
//! ### Access Token
//!
//! The access token is a short lived JWT that allows users to access resources within the scope
//! that they are allowed to. The access token itself contains enough information for services
//! to verify the user and their permissions in a stateless manner.
//!
//! ### Refresh Token
//!
//! The refresh token allows users to retrieve a new access token without needing to
//! re-authenticate. As such, the refresh token is longer lived, but can be revoked.
//!
//! ## Authentication Flow
//!
//! 1. Client attempts to access a resource on a protected service.
//! 1. Service responds with a `401 Unauthorized` authentication challenge with information on
//! how to authenticate
//! provided in the `WWW-Authenticate` response header.
//! 1. Using the information from the previous step, the client authenticates with the
//! authentication server. The client
//! will receive, among other information, opaque access and refresh tokens.
//! 1. The client retries the original request with the Bearer token embedded in the request’s
//! Authorization header.
//! 1. The service authorizes the client by validating the Bearer token and the claim set
//! embedded within it and
//! proceeds as usual.
//!
//! ### Authentication Challenge
//!
//! Services will challenge users who do not provide a valid token via the HTTP response
//! `401 Unauthorized`. Details for
//! authentication is provided in the `WWW-Authenticate` header.
//!
//! ```text
//! Www-Authenticate: Bearer realm="https://www.auth.com",service="https://www.example.com",scope="all"
//! ```
//!
//! The `realm` field indicates the authentcation server endpoint which clients should proceed to
//! authenticate against.
//!
//! The `service` field indicates the `service` value that clients should use when attempting to
//! authenticate at `realm`.
//!
//! The `scope` field indicates the `scope` value that clients should use when attempting to
//! authenticate at `realm`.
//!
//! ### Retrieving an Access Token (and optionally Refresh Token) from the Authentication Server
//!
//! A HTTP `GET` request should be made to the `realm` endpoint provided above. The endpoint will
//! support the following uery paremeters:
//!
//! - `service`: The service that the client is authenticating for. This should be the same as
//! the `service` value in the previous step
//! - `scope`: The scope that the client wishes to authenticate for.
//! This should be the same as the `scope` value in the previous step.
//! - `offline_token`: Set to `true` if a refresh token is also required. Defaults to `false`.
//! Cannot be set to `true` when using a refresh token to retrieve a new access token.
//!
//! When authenticating for the first time, clients should send the user's username and passwords
//! in the form of `Basic` authentication. If the client already has a prior refresh token and
//! would like to obtain a new access token, the client should send the refresh token in the form
//! of `Bearer` authentication.
//!
//! If successful, the authentcation server will return a `200 OK` response with a
//! JSON body containing the following fields:
//!
//! - `token`: An opaque Access (`Bearer`) token that clients should supply to subsequent requests
//! in the `Authorization` header.
//! - `expires_in`: The duration in seconds since the token was issued that it will remain valid.
//! - `issued_at`: RFC3339-serialized UTC standard time at which a given token was issued.
//! - `refresh_token`: An opaque `Refresh` token which can be used to get additional access
//! tokens for the same subject with different scopes. This token should be kept secure by
//! the client and only sent to the authorization server which issues access tokens.
//! This field will only be set when `offline_token=true` is provided in the request.
//!
//! If this fails, the server will return with the appropriate `4xx` response.
//!
//! ### Using the Access Token
//!
//! Once the client has a token, it will try the request again with the token placed in the
//! HTTP Authorization header like so:
//!
//! ```text
//! Authorization: Bearer <token>
//! ```
//!
//! ### Using the Refresh Token to Retrieve a New Access Token
//!
//! When the client's Access token expires, and it has previously asked for a Refresh Token,
//! the client can make a `GET` request to the same endpoint that the client used to retrieve the
//! access token (the `realm` URL in an authentication challenge).
//!
//! The steps are described in the section "Retrieving an Access Token" above. The process is the
//! same as the initial authentication except that instead of using `Basic` authentication,
//! the client should instead send the refresh token retrieved prior as `Bearer` authentication.
//! Also, `offline_token` cannot be requested for when requesting for a new access token using a
//! refresh token. (HTTP 401 will be returned if this happens.)
//!
//! ### Example
//!
//! This example uses `curl` to make request to the some (hypothetical) protected endpoint.
//! It requires [`jq`](https://stedolan.github.io/jq/) to parse JSON.
//!
//! ```bash
//! PROTECTED_RESOURCE="https://www.example.com/protected/resource/"
//!
//! # Save the response headers of our first request to the endpoint to get the Www-Authenticate
//! # header
//! RESPONSE_HEADER=$(tempfile);
//! curl --dump-header "${RESPONSE_HEADER}" "${PROTECTED_RESOURCE}"
//!
//! # Extract the realm, the service, and the scope from the Www-Authenticate header
//! WWWAUTH=$(cat "${RESPONSE_HEADER}" | grep "Www-Authenticate")
//! REALM=$(echo "${WWWAUTH}" | grep -o '\(realm\)="[^"]*"' | cut -d '"' -f 2)
//! SERVICE=$(echo "${WWWAUTH}" | grep -o '\(service\)="[^"]*"' | cut -d '"' -f 2)
//! SCOPE=$(echo "${WWWAUTH}" | grep -o '\(scope\)="[^"]*"' | cut -d '"' -f 2)
//!
//! # Build the URL to query the auth server
//! AUTH_URL="${REALM}?service=${SERVICE}&scope=${SCOPE}&offline_token=true"
//!
//! # Query the auth server to get a token -- replace the username and password
//! # below with the value from 1password
//! TOKEN=$(curl -s --user "mozart:password" "${AUTH_URL}")
//!
//! # Get the access token from the JSON string: {"token": "...."}
//! ACCESS_TOKEN=$(echo ${TOKEN} | jq.token | tr -d '"')
//!
//! # Query the resource again, but this time with a bearer token
//! curl -v -H "Authorization: Bearer ${ACCESS_TOKEN}" "${PROTECTED_RESOURCE}"
//!
//! # Get the refresh token
//! REFRESH_TOKEN=$(echo "${TOKEN}" | jq.refresh_token | tr -d '"')
//!
//! # Get a new access token
//! NEW_TOKEN=$(curl --header "Authorization: Bearer ${REFRESH_TOKEN}" "${AUTH_URL}")
//!
//! # Parse the new access token
//! NEW_ACCESS_TOKEN=$(echo "${TOKEN}" | jq.token | tr -d '"')
//!
//! # Query the resource again, but this time with a new access token
//! curl -v -H "Authorization: Bearer ${NEW_ACCESS_TOKEN}" "${PROTECTED_RESOURCE}"
//! ```
//!
//! ## Scope
//!
//! Not in use at the moment. Just use `all`.
//!
#![feature(proc_macro_hygiene, decl_macro)]
// See https://github.com/rust-unofficial/patterns/blob/master/anti_patterns/deny-warnings.md
#![allow(
legacy_directory_ownership,
missing_copy_implementations,
missing_debug_implementations,
unknown_lints,
unsafe_code,
intra_doc_link_resolution_failure
)]
#![deny(
const_err,
dead_code,
deprecated,
exceeding_bitshifts,
improper_ctypes,
missing_docs,
mutable_transmutes,
no_mangle_const_items,
non_camel_case_types,
non_shorthand_field_patterns,
non_upper_case_globals,
overflowing_literals,
path_statements,
plugin_as_library,
stable_features,
trivial_casts,
trivial_numeric_casts,
unconditional_recursion,
unknown_crate_types,
unreachable_code,
unused_allocation,
unused_assignments,
unused_attributes,
unused_comparisons,
unused_extern_crates,
unused_features,
unused_imports,
unused_import_braces,
unused_qualifications,
unused_must_use,
unused_mut,
unused_parens,
unused_results,
unused_unsafe,
unused_variables,
variant_size_differences,
warnings,
while_true
)]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
use biscuit as jwt;
use hyper;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[macro_use]
extern crate rocket;
// we are using the "log_!" macros which are redefined from `log`'s
use rocket_cors as cors;
#[macro_use]
extern crate serde_derive;
use serde_json;
#[cfg(test)]
extern crate serde_test;
#[macro_use]
mod macros;
#[cfg(test)]
#[macro_use]
mod test;
pub mod auth;
mod routes;
pub mod serde_custom;
pub mod token;
pub use self::routes::routes;
use std::error;
use std::fmt;
use std::io;
use std::ops::Deref;
use std::str::FromStr;
use ring::rand::SystemRandom;
use rocket::http::Status;
use rocket::response::{Responder, Response};
use rocket::Request;
use serde::de;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub use serde_json::Map as JsonMap;
pub use serde_json::Value as JsonValue;
/// Top level error enum
#[derive(Debug)]
pub enum Error {
/// A generic/unknown error
GenericError(String),
/// A bad request resulting from bad request parameters/headers
BadRequest(String),
/// Authentication error
Auth(auth::Error),
/// CORS error
CORS(cors::Error),
/// Token Error
Token(token::Error),
/// IO errors
IOError(io::Error),
/// An error launcing Rocket
LaunchError(rocket::error::LaunchError),
/// Unsupported operation
UnsupportedOperation,
}
impl_from_error!(auth::Error, Error::Auth);
impl_from_error!(cors::Error, Error::CORS);
impl_from_error!(token::Error, Error::Token);
impl_from_error!(String, Error::GenericError);
impl_from_error!(io::Error, Error::IOError);
impl_from_error!(rocket::error::LaunchError, Error::LaunchError);
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::UnsupportedOperation => "This operation is not supported",
Error::Auth(ref e) => e.description(),
Error::CORS(ref e) => e.description(),
Error::Token(ref e) => e.description(),
Error::IOError(ref e) => e.description(),
Error::LaunchError(ref e) => e.description(),
Error::GenericError(ref e) | Error::BadRequest(ref e) => e,
}
}
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
Error::Auth(ref e) => Some(e),
Error::CORS(ref e) => Some(e),
Error::Token(ref e) => Some(e),
Error::IOError(ref e) => Some(e),
Error::LaunchError(ref e) => Some(e),
Error::UnsupportedOperation | Error::GenericError(_) | Error::BadRequest(_) => {
Some(self)
}
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::UnsupportedOperation => write!(f, "{}", error::Error::description(self)),
Error::Auth(ref e) => fmt::Display::fmt(e, f),
Error::CORS(ref e) => fmt::Display::fmt(e, f),
Error::Token(ref e) => fmt::Display::fmt(e, f),
Error::IOError(ref e) => fmt::Display::fmt(e, f),
Error::GenericError(ref e) => fmt::Display::fmt(e, f),
Error::LaunchError(ref e) => fmt::Display::fmt(e, f),
Error::BadRequest(ref e) => fmt::Display::fmt(e, f),
}
}
}
impl<'r> Responder<'r> for Error {
fn respond_to(self, request: &Request<'_>) -> Result<Response<'r>, Status> {
match self {
Error::Auth(e) => e.respond_to(request),
Error::CORS(e) => e.respond_to(request),
Error::Token(e) => e.respond_to(request),
Error::BadRequest(e) => {
error_!("{}", e);
Err(Status::BadRequest)
}
e => {
error_!("{}", e);
Err(Status::InternalServerError)
}
}
}
}
/// Wrapper around `hyper::Url` with `Serialize` and `Deserialize` implemented
#[derive(Clone, Eq, PartialEq, Hash, Debug)]
pub struct Url(hyper::Url);
impl_deref!(Url, hyper::Url);
impl FromStr for Url {
type Err = hyper::error::ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Url(hyper::Url::from_str(s)?))
}
}
impl fmt::Display for Url {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0.as_str())
}
}
| serializer.serialize_str(self.0.as_str())
}
}
impl<'de> Deserialize<'de> for Url {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct UrlVisitor;
impl<'de> de::Visitor<'de> for UrlVisitor {
type Value = Url;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a valid URL string")
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Url(
hyper::Url::from_str(&value).map_err(|e| E::custom(e.to_string()))?
))
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(Url(
hyper::Url::from_str(value).map_err(|e| E::custom(e.to_string()))?
))
}
}
deserializer.deserialize_string(UrlVisitor)
}
}
/// A sequence of bytes, either as an array of unsigned 8 bit integers, or a string which will be
/// treated as UTF-8.
/// This enum is (de)serialized [`untagged`](https://serde.rs/enum-representations.html).
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]
#[serde(untagged)]
pub enum ByteSequence {
/// A string which will be converted to UTF-8 and then to bytes.
String(String),
/// A sequence of unsigned 8 bits integers which will be treated as bytes.
Bytes(Vec<u8>),
}
impl ByteSequence {
/// Returns the byte sequence.
pub fn as_bytes(&self) -> Vec<u8> {
match *self {
ByteSequence::String(ref string) => string.to_string().into_bytes(),
ByteSequence::Bytes(ref bytes) => bytes.to_vec(),
}
}
}
/// Application configuration. Usually deserialized from JSON for use.
///
/// The type parameter `B` is the [`auth::AuthenticatorConfiguration`] and by its associated
/// type, the `Authenticator` that is going to be used for HTTP Basic Authentication.
///
/// # Examples
/// ```
/// extern crate rowdy;
/// extern crate serde_json;
///
/// use rowdy::Configuration;
/// use rowdy::auth::NoOpConfiguration;
///
/// # fn main() {
/// // We are using the `NoOp` authenticator
/// let json = r#"{
/// "token" : {
/// "issuer": "https://www.acme.com",
/// "allowed_origins": { "Some": ["https://www.example.com", "https://www.foobar.com"] },
/// "audience": ["https://www.example.com", "https://www.foobar.com"],
/// "signature_algorithm": "RS256",
/// "secret": {
/// "rsa_private": "test/fixtures/rsa_private_key.der",
/// "rsa_public": "test/fixtures/rsa_public_key.der"
/// },
/// "expiry_duration": 86400
/// },
/// "basic_authenticator": {}
/// }"#;
/// let config: Configuration<NoOpConfiguration> = serde_json::from_str(json).unwrap();
/// let rocket = config.ignite().unwrap().mount("/", rowdy::routes());
/// // then `rocket.launch()`!
/// # }
/// ```
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Configuration<B> {
/// Token configuration. See the type documentation for deserialization examples
pub token: token::Configuration,
/// The configuration for the authenticator that will handle HTTP Basic Authentication.
pub basic_authenticator: B,
}
impl<B: auth::AuthenticatorConfiguration<auth::Basic>> Configuration<B> {
/// Ignites the rocket with various configuration objects, but does not mount any routes.
/// Remember to mount routes and call `launch` on the returned Rocket object.
/// See the struct documentation for an example.
pub fn ignite(&self) -> Result<rocket::Rocket, Error> {
let token_getter_cors_options = self.token.cors_option();
let basic_authenticator = self.basic_authenticator.make_authenticator()?;
let basic_authenticator: Box<auth::BasicAuthenticator> = Box::new(basic_authenticator);
// Prepare the keys
let keys = self.token.keys()?;
Ok(rocket::ignite()
.manage(self.token.clone())
.manage(basic_authenticator)
.manage(keys)
.attach(token_getter_cors_options))
}
}
/// Convenience function to ignite and launch rowdy. This function will never return
///
/// # Panics
/// Panics if during the Rocket igition, something goes wrong.
///
/// # Example
/// ```rust,no_run
/// extern crate rowdy;
/// extern crate serde_json;
///
/// use rowdy::Configuration;
/// use rowdy::auth::NoOpConfiguration;
///
/// # fn main() {
/// // We are using the `NoOp` authenticator
/// let json = r#"{
/// "token" : {
/// "issuer": "https://www.acme.com",
/// "allowed_origins": ["https://www.example.com", "https://www.foobar.com"],
/// "audience": ["https://www.example.com", "https://www.foobar.com"],
/// "signature_algorithm": "RS256",
/// "secret": {
/// "rsa_private": "test/fixtures/rsa_private_key.der",
/// "rsa_public": "test/fixtures/rsa_public_key.der"
/// },
/// "expiry_duration": 86400
/// },
/// "basic_authenticator": {}
/// }"#;
/// let config: Configuration<NoOpConfiguration> = serde_json::from_str(json).unwrap();
///
/// rowdy::launch(config);
/// # }
/// ```
pub fn launch<B: auth::AuthenticatorConfiguration<auth::Basic>>(
config: Configuration<B>,
) -> rocket::error::LaunchError {
let rocket = config.ignite().unwrap_or_else(|e| panic!("{}", e));
rocket.mount("/", routes()).launch()
}
/// Return a psuedo random number generator
pub(crate) fn rng() -> &'static SystemRandom {
use std::ops::Deref;
lazy_static! {
static ref RANDOM: SystemRandom = SystemRandom::new();
}
RANDOM.deref()
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use serde_test::{assert_tokens, Token};
use super::*;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
struct TestUrl {
url: Url,
}
#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
struct TestClaims {
company: String,
department: String,
}
impl Default for TestClaims {
fn default() -> Self {
TestClaims {
company: "ACME".to_string(),
department: "Toilet Cleaning".to_string(),
}
}
}
#[test]
fn url_serialization_token_round_trip() {
let test = TestUrl {
url: not_err!(Url::from_str("https://www.example.com/")),
};
assert_tokens(
&test,
&[
Token::Struct {
name: "TestUrl",
len: 1,
},
Token::Str("url"),
Token::Str("https://www.example.com/"),
Token::StructEnd,
],
);
}
} | impl Serialize for Url {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{ | random_line_split |
window_update.rs | // Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
// std
use std::vec::IntoIter;
// osmium
use super::CompressibleHttpFrame;
use super::FrameType;
use http2::error;
const WINDOW_SIZE_INCREMENT_BIT_MASK: u8 = 0x80;
#[derive(Debug)]
pub struct WindowUpdateFrameCompressModel {
window_size_increment: u32
}
impl WindowUpdateFrameCompressModel {
pub fn new(window_size_increment: u32) -> Self {
WindowUpdateFrameCompressModel {
window_size_increment: window_size_increment
}
}
}
impl CompressibleHttpFrame for WindowUpdateFrameCompressModel {
fn get_length(&self) -> i32 {
// 4 octets for the 32 bits in the window size increment
4
}
fn get_frame_type(&self) -> FrameType {
FrameType::WindowUpdate
}
fn get_flags(&self) -> u8 {
// this frame doesn't define any flags
0
}
fn get_payload(self: Box<Self>) -> Vec<u8> |
}
pub struct WindowUpdateFrame {
window_size_increment: u32
}
impl WindowUpdateFrame {
pub fn new_conn(frame_header: &super::FrameHeader, frame: &mut IntoIter<u8>) -> Result<Self, error::HttpError> {
if frame_header.length!= 4 {
return Err(error::HttpError::ConnectionError(
error::ErrorCode::FrameSizeError,
error::ErrorName::InvalidFrameLengthForConnectionWindowUpdateFrame
));
}
let window_size_increment_first_octet = frame.next().unwrap();
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
Ok(WindowUpdateFrame {
window_size_increment:
(((window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK) as u32) << 24) +
((frame.next().unwrap() as u32) << 16) +
((frame.next().unwrap() as u32) << 8) +
(frame.next().unwrap() as u32)
})
}
pub fn new_stream(frame_header: &super::StreamFrameHeader, frame: &mut IntoIter<u8>) -> Result<Self, error::HttpError> {
if frame_header.length!= 4 {
return Err(error::HttpError::ConnectionError(
error::ErrorCode::FrameSizeError,
error::ErrorName::InvalidFrameLengthForConnectionWindowUpdateFrame
));
}
let window_size_increment_first_octet = frame.next().unwrap();
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
Ok(WindowUpdateFrame {
window_size_increment:
(((window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK) as u32) << 24) +
((frame.next().unwrap() as u32) << 16) +
((frame.next().unwrap() as u32) << 8) +
(frame.next().unwrap() as u32)
})
}
pub fn get_window_size_increment(&self) -> u32 {
self.window_size_increment
}
}
| {
let mut result = Vec::new();
// include the window size increment
let window_size_increment_first_octet = (self.window_size_increment >> 24) as u8;
// TODO handle error
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
result.push(window_size_increment_first_octet & !WINDOW_SIZE_INCREMENT_BIT_MASK);
result.push((self.window_size_increment >> 16) as u8);
result.push((self.window_size_increment >> 8) as u8);
result.push(self.window_size_increment as u8);
result
} | identifier_body |
window_update.rs | // Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
// std
use std::vec::IntoIter;
// osmium
use super::CompressibleHttpFrame;
use super::FrameType;
use http2::error;
const WINDOW_SIZE_INCREMENT_BIT_MASK: u8 = 0x80;
#[derive(Debug)]
pub struct WindowUpdateFrameCompressModel {
window_size_increment: u32
}
impl WindowUpdateFrameCompressModel {
pub fn new(window_size_increment: u32) -> Self {
WindowUpdateFrameCompressModel {
window_size_increment: window_size_increment
}
}
}
impl CompressibleHttpFrame for WindowUpdateFrameCompressModel {
fn get_length(&self) -> i32 {
// 4 octets for the 32 bits in the window size increment
4
}
fn get_frame_type(&self) -> FrameType {
FrameType::WindowUpdate
}
fn get_flags(&self) -> u8 {
// this frame doesn't define any flags
0
}
fn get_payload(self: Box<Self>) -> Vec<u8> {
let mut result = Vec::new();
// include the window size increment
let window_size_increment_first_octet = (self.window_size_increment >> 24) as u8;
// TODO handle error
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
result.push(window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK);
result.push((self.window_size_increment >> 16) as u8);
result.push((self.window_size_increment >> 8) as u8);
result.push(self.window_size_increment as u8);
result
}
}
pub struct WindowUpdateFrame {
window_size_increment: u32
}
impl WindowUpdateFrame {
pub fn new_conn(frame_header: &super::FrameHeader, frame: &mut IntoIter<u8>) -> Result<Self, error::HttpError> {
if frame_header.length!= 4 {
return Err(error::HttpError::ConnectionError(
error::ErrorCode::FrameSizeError,
error::ErrorName::InvalidFrameLengthForConnectionWindowUpdateFrame
));
}
let window_size_increment_first_octet = frame.next().unwrap();
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
Ok(WindowUpdateFrame {
window_size_increment:
(((window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK) as u32) << 24) +
((frame.next().unwrap() as u32) << 16) +
((frame.next().unwrap() as u32) << 8) +
(frame.next().unwrap() as u32)
})
}
pub fn | (frame_header: &super::StreamFrameHeader, frame: &mut IntoIter<u8>) -> Result<Self, error::HttpError> {
if frame_header.length!= 4 {
return Err(error::HttpError::ConnectionError(
error::ErrorCode::FrameSizeError,
error::ErrorName::InvalidFrameLengthForConnectionWindowUpdateFrame
));
}
let window_size_increment_first_octet = frame.next().unwrap();
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
Ok(WindowUpdateFrame {
window_size_increment:
(((window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK) as u32) << 24) +
((frame.next().unwrap() as u32) << 16) +
((frame.next().unwrap() as u32) << 8) +
(frame.next().unwrap() as u32)
})
}
pub fn get_window_size_increment(&self) -> u32 {
self.window_size_increment
}
}
| new_stream | identifier_name |
window_update.rs | // Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
// std
use std::vec::IntoIter;
// osmium
use super::CompressibleHttpFrame;
use super::FrameType;
use http2::error;
const WINDOW_SIZE_INCREMENT_BIT_MASK: u8 = 0x80;
#[derive(Debug)]
pub struct WindowUpdateFrameCompressModel {
window_size_increment: u32
}
impl WindowUpdateFrameCompressModel {
pub fn new(window_size_increment: u32) -> Self {
WindowUpdateFrameCompressModel {
window_size_increment: window_size_increment
}
}
}
impl CompressibleHttpFrame for WindowUpdateFrameCompressModel {
fn get_length(&self) -> i32 {
// 4 octets for the 32 bits in the window size increment
4
}
fn get_frame_type(&self) -> FrameType {
FrameType::WindowUpdate
}
fn get_flags(&self) -> u8 {
// this frame doesn't define any flags
0
}
fn get_payload(self: Box<Self>) -> Vec<u8> {
let mut result = Vec::new();
// include the window size increment
let window_size_increment_first_octet = (self.window_size_increment >> 24) as u8;
// TODO handle error
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
result.push(window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK);
result.push((self.window_size_increment >> 16) as u8);
result.push((self.window_size_increment >> 8) as u8);
result.push(self.window_size_increment as u8);
result
}
}
pub struct WindowUpdateFrame {
window_size_increment: u32
}
impl WindowUpdateFrame {
pub fn new_conn(frame_header: &super::FrameHeader, frame: &mut IntoIter<u8>) -> Result<Self, error::HttpError> {
if frame_header.length!= 4 {
return Err(error::HttpError::ConnectionError(
error::ErrorCode::FrameSizeError,
error::ErrorName::InvalidFrameLengthForConnectionWindowUpdateFrame
));
}
let window_size_increment_first_octet = frame.next().unwrap();
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
Ok(WindowUpdateFrame {
window_size_increment:
(((window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK) as u32) << 24) +
((frame.next().unwrap() as u32) << 16) +
((frame.next().unwrap() as u32) << 8) +
(frame.next().unwrap() as u32)
})
}
pub fn new_stream(frame_header: &super::StreamFrameHeader, frame: &mut IntoIter<u8>) -> Result<Self, error::HttpError> {
if frame_header.length!= 4 |
let window_size_increment_first_octet = frame.next().unwrap();
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
Ok(WindowUpdateFrame {
window_size_increment:
(((window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK) as u32) << 24) +
((frame.next().unwrap() as u32) << 16) +
((frame.next().unwrap() as u32) << 8) +
(frame.next().unwrap() as u32)
})
}
pub fn get_window_size_increment(&self) -> u32 {
self.window_size_increment
}
}
| {
return Err(error::HttpError::ConnectionError(
error::ErrorCode::FrameSizeError,
error::ErrorName::InvalidFrameLengthForConnectionWindowUpdateFrame
));
} | conditional_block |
window_update.rs | // Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
// std
use std::vec::IntoIter;
// osmium
use super::CompressibleHttpFrame;
use super::FrameType;
use http2::error;
const WINDOW_SIZE_INCREMENT_BIT_MASK: u8 = 0x80;
#[derive(Debug)]
pub struct WindowUpdateFrameCompressModel {
window_size_increment: u32 | impl WindowUpdateFrameCompressModel {
pub fn new(window_size_increment: u32) -> Self {
WindowUpdateFrameCompressModel {
window_size_increment: window_size_increment
}
}
}
impl CompressibleHttpFrame for WindowUpdateFrameCompressModel {
fn get_length(&self) -> i32 {
// 4 octets for the 32 bits in the window size increment
4
}
fn get_frame_type(&self) -> FrameType {
FrameType::WindowUpdate
}
fn get_flags(&self) -> u8 {
// this frame doesn't define any flags
0
}
fn get_payload(self: Box<Self>) -> Vec<u8> {
let mut result = Vec::new();
// include the window size increment
let window_size_increment_first_octet = (self.window_size_increment >> 24) as u8;
// TODO handle error
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
result.push(window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK);
result.push((self.window_size_increment >> 16) as u8);
result.push((self.window_size_increment >> 8) as u8);
result.push(self.window_size_increment as u8);
result
}
}
pub struct WindowUpdateFrame {
window_size_increment: u32
}
impl WindowUpdateFrame {
pub fn new_conn(frame_header: &super::FrameHeader, frame: &mut IntoIter<u8>) -> Result<Self, error::HttpError> {
if frame_header.length!= 4 {
return Err(error::HttpError::ConnectionError(
error::ErrorCode::FrameSizeError,
error::ErrorName::InvalidFrameLengthForConnectionWindowUpdateFrame
));
}
let window_size_increment_first_octet = frame.next().unwrap();
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
Ok(WindowUpdateFrame {
window_size_increment:
(((window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK) as u32) << 24) +
((frame.next().unwrap() as u32) << 16) +
((frame.next().unwrap() as u32) << 8) +
(frame.next().unwrap() as u32)
})
}
pub fn new_stream(frame_header: &super::StreamFrameHeader, frame: &mut IntoIter<u8>) -> Result<Self, error::HttpError> {
if frame_header.length!= 4 {
return Err(error::HttpError::ConnectionError(
error::ErrorCode::FrameSizeError,
error::ErrorName::InvalidFrameLengthForConnectionWindowUpdateFrame
));
}
let window_size_increment_first_octet = frame.next().unwrap();
assert_eq!(0, window_size_increment_first_octet & WINDOW_SIZE_INCREMENT_BIT_MASK);
Ok(WindowUpdateFrame {
window_size_increment:
(((window_size_increment_first_octet &!WINDOW_SIZE_INCREMENT_BIT_MASK) as u32) << 24) +
((frame.next().unwrap() as u32) << 16) +
((frame.next().unwrap() as u32) << 8) +
(frame.next().unwrap() as u32)
})
}
pub fn get_window_size_increment(&self) -> u32 {
self.window_size_increment
}
} | }
| random_line_split |
panic.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate mozjs;
use std::ptr;
use mozjs::jsapi::{JSAutoRealm, JSContext, OnNewGlobalHookOption, Value};
use mozjs::jsapi::{JS_DefineFunction, JS_NewGlobalObject};
use mozjs::jsval::UndefinedValue;
use mozjs::panic::wrap_panic;
use mozjs::rust::{JSEngine, RealmOptions, Runtime, SIMPLE_GLOBAL_CLASS};
#[test]
#[should_panic]
fn test_panic() {
let engine = JSEngine::init().unwrap();
let runtime = Runtime::new(engine.handle());
let context = runtime.cx();
let h_option = OnNewGlobalHookOption::FireOnNewGlobalHook;
let c_option = RealmOptions::default();
unsafe {
rooted!(in(context) let global = JS_NewGlobalObject(
context,
&SIMPLE_GLOBAL_CLASS,
ptr::null_mut(),
h_option,
&*c_option,
));
let _ac = JSAutoRealm::new(context, global.get());
let function = JS_DefineFunction(
context,
global.handle().into(),
b"test\0".as_ptr() as *const _,
Some(test),
0,
0,
);
assert!(!function.is_null());
rooted!(in(context) let mut rval = UndefinedValue());
let _ =
runtime.evaluate_script(global.handle(), "test();", "test.js", 0, rval.handle_mut());
}
}
unsafe extern "C" fn | (_cx: *mut JSContext, _argc: u32, _vp: *mut Value) -> bool {
let mut result = false;
wrap_panic(&mut || {
panic!();
#[allow(unreachable_code)]
{
result = true
}
});
result
}
| test | identifier_name |
panic.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate mozjs;
use std::ptr;
use mozjs::jsapi::{JSAutoRealm, JSContext, OnNewGlobalHookOption, Value};
use mozjs::jsapi::{JS_DefineFunction, JS_NewGlobalObject};
use mozjs::jsval::UndefinedValue;
use mozjs::panic::wrap_panic;
use mozjs::rust::{JSEngine, RealmOptions, Runtime, SIMPLE_GLOBAL_CLASS};
#[test]
#[should_panic]
fn test_panic() {
let engine = JSEngine::init().unwrap();
let runtime = Runtime::new(engine.handle());
let context = runtime.cx();
let h_option = OnNewGlobalHookOption::FireOnNewGlobalHook;
let c_option = RealmOptions::default();
unsafe {
rooted!(in(context) let global = JS_NewGlobalObject(
context,
&SIMPLE_GLOBAL_CLASS,
ptr::null_mut(),
h_option,
&*c_option,
));
let _ac = JSAutoRealm::new(context, global.get());
let function = JS_DefineFunction(
context,
global.handle().into(),
b"test\0".as_ptr() as *const _,
Some(test),
0,
0,
);
assert!(!function.is_null());
rooted!(in(context) let mut rval = UndefinedValue());
let _ =
runtime.evaluate_script(global.handle(), "test();", "test.js", 0, rval.handle_mut());
}
}
unsafe extern "C" fn test(_cx: *mut JSContext, _argc: u32, _vp: *mut Value) -> bool {
let mut result = false;
wrap_panic(&mut || {
panic!();
#[allow(unreachable_code)]
{
result = true | result
} | }
}); | random_line_split |
panic.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate mozjs;
use std::ptr;
use mozjs::jsapi::{JSAutoRealm, JSContext, OnNewGlobalHookOption, Value};
use mozjs::jsapi::{JS_DefineFunction, JS_NewGlobalObject};
use mozjs::jsval::UndefinedValue;
use mozjs::panic::wrap_panic;
use mozjs::rust::{JSEngine, RealmOptions, Runtime, SIMPLE_GLOBAL_CLASS};
#[test]
#[should_panic]
fn test_panic() {
let engine = JSEngine::init().unwrap();
let runtime = Runtime::new(engine.handle());
let context = runtime.cx();
let h_option = OnNewGlobalHookOption::FireOnNewGlobalHook;
let c_option = RealmOptions::default();
unsafe {
rooted!(in(context) let global = JS_NewGlobalObject(
context,
&SIMPLE_GLOBAL_CLASS,
ptr::null_mut(),
h_option,
&*c_option,
));
let _ac = JSAutoRealm::new(context, global.get());
let function = JS_DefineFunction(
context,
global.handle().into(),
b"test\0".as_ptr() as *const _,
Some(test),
0,
0,
);
assert!(!function.is_null());
rooted!(in(context) let mut rval = UndefinedValue());
let _ =
runtime.evaluate_script(global.handle(), "test();", "test.js", 0, rval.handle_mut());
}
}
unsafe extern "C" fn test(_cx: *mut JSContext, _argc: u32, _vp: *mut Value) -> bool | {
let mut result = false;
wrap_panic(&mut || {
panic!();
#[allow(unreachable_code)]
{
result = true
}
});
result
} | identifier_body |
|
damage-chart.rs | use std::io::Write;
use world::{attack_damage, roll};
fn ev<F>(n: usize, f: F) -> f32
where
F: Fn(&mut rand::prelude::ThreadRng) -> f32,
{
let mut acc = 0.0;
let mut rng = rand::thread_rng();
for _ in 0..n {
acc += f(&mut rng);
}
acc / n as f32
}
fn expected_dmg(advantage: i32) -> f32 {
const REPEAT_ROLLS: usize = 1_000_000;
ev(REPEAT_ROLLS, |rng| {
let roll = roll(rng);
let dmg = attack_damage(roll, advantage, 100);
dmg as f32 / 100.0
})
}
fn main() | });
println!("Hello, world!");
println!("Expected dmg: {}", e);
}
| {
print!(" ");
for one in 0..10 {
print!(" 0{}", one);
}
println!("");
for tens in -3..10 {
print!("{:>3}0 ", tens);
for ones in 0..10 {
let n = tens * 10 + ones;
print!("{:.3} ", expected_dmg(n));
let _ = ::std::io::stdout().flush();
}
println!("");
}
let e = ev(1_000_000, |rng| {
let roll = roll(rng);
let dmg = attack_damage(roll, 0, 100);
dmg as f32 / 100.0 | identifier_body |
damage-chart.rs | use std::io::Write;
use world::{attack_damage, roll};
fn ev<F>(n: usize, f: F) -> f32
where
F: Fn(&mut rand::prelude::ThreadRng) -> f32,
{
let mut acc = 0.0;
let mut rng = rand::thread_rng();
for _ in 0..n {
acc += f(&mut rng);
}
acc / n as f32
}
fn | (advantage: i32) -> f32 {
const REPEAT_ROLLS: usize = 1_000_000;
ev(REPEAT_ROLLS, |rng| {
let roll = roll(rng);
let dmg = attack_damage(roll, advantage, 100);
dmg as f32 / 100.0
})
}
fn main() {
print!(" ");
for one in 0..10 {
print!(" 0{}", one);
}
println!("");
for tens in -3..10 {
print!("{:>3}0 ", tens);
for ones in 0..10 {
let n = tens * 10 + ones;
print!("{:.3} ", expected_dmg(n));
let _ = ::std::io::stdout().flush();
}
println!("");
}
let e = ev(1_000_000, |rng| {
let roll = roll(rng);
let dmg = attack_damage(roll, 0, 100);
dmg as f32 / 100.0
});
println!("Hello, world!");
println!("Expected dmg: {}", e);
}
| expected_dmg | identifier_name |
damage-chart.rs | use std::io::Write;
use world::{attack_damage, roll};
fn ev<F>(n: usize, f: F) -> f32
where
F: Fn(&mut rand::prelude::ThreadRng) -> f32,
{
let mut acc = 0.0;
let mut rng = rand::thread_rng();
for _ in 0..n {
acc += f(&mut rng);
}
| }
fn expected_dmg(advantage: i32) -> f32 {
const REPEAT_ROLLS: usize = 1_000_000;
ev(REPEAT_ROLLS, |rng| {
let roll = roll(rng);
let dmg = attack_damage(roll, advantage, 100);
dmg as f32 / 100.0
})
}
fn main() {
print!(" ");
for one in 0..10 {
print!(" 0{}", one);
}
println!("");
for tens in -3..10 {
print!("{:>3}0 ", tens);
for ones in 0..10 {
let n = tens * 10 + ones;
print!("{:.3} ", expected_dmg(n));
let _ = ::std::io::stdout().flush();
}
println!("");
}
let e = ev(1_000_000, |rng| {
let roll = roll(rng);
let dmg = attack_damage(roll, 0, 100);
dmg as f32 / 100.0
});
println!("Hello, world!");
println!("Expected dmg: {}", e);
} | acc / n as f32 | random_line_split |
shootout-spectralnorm.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate sync;
use std::from_str::FromStr;
use std::iter::count;
use std::cmp::min;
use std::os;
use sync::{Arc, RWLock};
fn A(i: uint, j: uint) -> f64 {
((i + j) * (i + j + 1) / 2 + i + 1) as f64
}
fn dot(v: &[f64], u: &[f64]) -> f64 {
let mut sum = 0.0;
for (&v_i, &u_i) in v.iter().zip(u.iter()) {
sum += v_i * u_i;
}
sum
}
fn mult(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>,
f: fn(&Vec<f64>, uint) -> f64) {
// We lanch in different tasks the work to be done. To finish
// this fuction, we need to wait for the completion of every
// tasks. To do that, we give to each tasks a wait_chan that we
// drop at the end of the work. At the end of this function, we
// wait until the channel hang up.
let (tx, rx) = channel();
let len = out.read().len();
let chunk = len / 100 + 1;
for chk in count(0, chunk) {
if chk >= len {break;}
let tx = tx.clone();
let v = v.clone();
let out = out.clone();
spawn(proc() {
for i in range(chk, min(len, chk + chunk)) {
let val = f(&*v.read(), i);
*out.write().get_mut(i) = val;
}
drop(tx)
});
}
// wait until the channel hang up (every task finished)
drop(tx);
for () in rx.iter() {}
}
fn mult_Av_impl(v: &Vec<f64>, i: uint) -> f64 {
let mut sum = 0.;
for (j, &v_j) in v.iter().enumerate() {
sum += v_j / A(i, j);
}
sum
}
fn mult_Av(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>) {
mult(v, out, mult_Av_impl);
}
fn mult_Atv_impl(v: &Vec<f64>, i: uint) -> f64 {
let mut sum = 0.;
for (j, &v_j) in v.iter().enumerate() {
sum += v_j / A(j, i);
}
sum
}
fn mult_Atv(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>) {
mult(v, out, mult_Atv_impl);
}
fn | (v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>,
tmp: Arc<RWLock<Vec<f64>>>) {
mult_Av(v, tmp.clone());
mult_Atv(tmp, out);
}
fn main() {
let args = os::args();
let n = if os::getenv("RUST_BENCH").is_some() {
5500
} else if args.len() < 2 {
2000
} else {
FromStr::from_str(args[1]).unwrap()
};
let u = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
let v = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
let tmp = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
for _ in range(0, 10) {
mult_AtAv(u.clone(), v.clone(), tmp.clone());
mult_AtAv(v.clone(), u.clone(), tmp.clone());
}
let u = u.read();
let v = v.read();
println!("{:.9f}", (dot(u.as_slice(), v.as_slice()) /
dot(v.as_slice(), v.as_slice())).sqrt());
}
| mult_AtAv | identifier_name |
shootout-spectralnorm.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate sync;
use std::from_str::FromStr;
use std::iter::count;
use std::cmp::min;
use std::os;
use sync::{Arc, RWLock};
fn A(i: uint, j: uint) -> f64 {
((i + j) * (i + j + 1) / 2 + i + 1) as f64
}
fn dot(v: &[f64], u: &[f64]) -> f64 {
let mut sum = 0.0;
for (&v_i, &u_i) in v.iter().zip(u.iter()) {
sum += v_i * u_i;
}
sum
}
fn mult(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>,
f: fn(&Vec<f64>, uint) -> f64) {
// We lanch in different tasks the work to be done. To finish
// this fuction, we need to wait for the completion of every
// tasks. To do that, we give to each tasks a wait_chan that we
// drop at the end of the work. At the end of this function, we
// wait until the channel hang up.
let (tx, rx) = channel();
let len = out.read().len();
let chunk = len / 100 + 1;
for chk in count(0, chunk) {
if chk >= len {break;}
let tx = tx.clone();
let v = v.clone();
let out = out.clone();
spawn(proc() {
for i in range(chk, min(len, chk + chunk)) {
let val = f(&*v.read(), i);
*out.write().get_mut(i) = val;
}
drop(tx)
});
}
// wait until the channel hang up (every task finished)
drop(tx);
for () in rx.iter() {}
}
fn mult_Av_impl(v: &Vec<f64>, i: uint) -> f64 {
let mut sum = 0.;
for (j, &v_j) in v.iter().enumerate() {
sum += v_j / A(i, j);
}
sum
}
fn mult_Av(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>) {
mult(v, out, mult_Av_impl);
}
fn mult_Atv_impl(v: &Vec<f64>, i: uint) -> f64 {
let mut sum = 0.;
for (j, &v_j) in v.iter().enumerate() {
sum += v_j / A(j, i);
}
sum
}
fn mult_Atv(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>) {
mult(v, out, mult_Atv_impl);
}
fn mult_AtAv(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>,
tmp: Arc<RWLock<Vec<f64>>>) {
mult_Av(v, tmp.clone());
mult_Atv(tmp, out);
}
| 2000
} else {
FromStr::from_str(args[1]).unwrap()
};
let u = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
let v = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
let tmp = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
for _ in range(0, 10) {
mult_AtAv(u.clone(), v.clone(), tmp.clone());
mult_AtAv(v.clone(), u.clone(), tmp.clone());
}
let u = u.read();
let v = v.read();
println!("{:.9f}", (dot(u.as_slice(), v.as_slice()) /
dot(v.as_slice(), v.as_slice())).sqrt());
} | fn main() {
let args = os::args();
let n = if os::getenv("RUST_BENCH").is_some() {
5500
} else if args.len() < 2 { | random_line_split |
shootout-spectralnorm.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate sync;
use std::from_str::FromStr;
use std::iter::count;
use std::cmp::min;
use std::os;
use sync::{Arc, RWLock};
fn A(i: uint, j: uint) -> f64 {
((i + j) * (i + j + 1) / 2 + i + 1) as f64
}
fn dot(v: &[f64], u: &[f64]) -> f64 {
let mut sum = 0.0;
for (&v_i, &u_i) in v.iter().zip(u.iter()) {
sum += v_i * u_i;
}
sum
}
fn mult(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>,
f: fn(&Vec<f64>, uint) -> f64) {
// We lanch in different tasks the work to be done. To finish
// this fuction, we need to wait for the completion of every
// tasks. To do that, we give to each tasks a wait_chan that we
// drop at the end of the work. At the end of this function, we
// wait until the channel hang up.
let (tx, rx) = channel();
let len = out.read().len();
let chunk = len / 100 + 1;
for chk in count(0, chunk) {
if chk >= len {break;}
let tx = tx.clone();
let v = v.clone();
let out = out.clone();
spawn(proc() {
for i in range(chk, min(len, chk + chunk)) {
let val = f(&*v.read(), i);
*out.write().get_mut(i) = val;
}
drop(tx)
});
}
// wait until the channel hang up (every task finished)
drop(tx);
for () in rx.iter() {}
}
fn mult_Av_impl(v: &Vec<f64>, i: uint) -> f64 {
let mut sum = 0.;
for (j, &v_j) in v.iter().enumerate() {
sum += v_j / A(i, j);
}
sum
}
fn mult_Av(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>) {
mult(v, out, mult_Av_impl);
}
fn mult_Atv_impl(v: &Vec<f64>, i: uint) -> f64 {
let mut sum = 0.;
for (j, &v_j) in v.iter().enumerate() {
sum += v_j / A(j, i);
}
sum
}
fn mult_Atv(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>) |
fn mult_AtAv(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>,
tmp: Arc<RWLock<Vec<f64>>>) {
mult_Av(v, tmp.clone());
mult_Atv(tmp, out);
}
fn main() {
let args = os::args();
let n = if os::getenv("RUST_BENCH").is_some() {
5500
} else if args.len() < 2 {
2000
} else {
FromStr::from_str(args[1]).unwrap()
};
let u = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
let v = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
let tmp = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
for _ in range(0, 10) {
mult_AtAv(u.clone(), v.clone(), tmp.clone());
mult_AtAv(v.clone(), u.clone(), tmp.clone());
}
let u = u.read();
let v = v.read();
println!("{:.9f}", (dot(u.as_slice(), v.as_slice()) /
dot(v.as_slice(), v.as_slice())).sqrt());
}
| {
mult(v, out, mult_Atv_impl);
} | identifier_body |
shootout-spectralnorm.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate sync;
use std::from_str::FromStr;
use std::iter::count;
use std::cmp::min;
use std::os;
use sync::{Arc, RWLock};
fn A(i: uint, j: uint) -> f64 {
((i + j) * (i + j + 1) / 2 + i + 1) as f64
}
fn dot(v: &[f64], u: &[f64]) -> f64 {
let mut sum = 0.0;
for (&v_i, &u_i) in v.iter().zip(u.iter()) {
sum += v_i * u_i;
}
sum
}
fn mult(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>,
f: fn(&Vec<f64>, uint) -> f64) {
// We lanch in different tasks the work to be done. To finish
// this fuction, we need to wait for the completion of every
// tasks. To do that, we give to each tasks a wait_chan that we
// drop at the end of the work. At the end of this function, we
// wait until the channel hang up.
let (tx, rx) = channel();
let len = out.read().len();
let chunk = len / 100 + 1;
for chk in count(0, chunk) {
if chk >= len {break;}
let tx = tx.clone();
let v = v.clone();
let out = out.clone();
spawn(proc() {
for i in range(chk, min(len, chk + chunk)) {
let val = f(&*v.read(), i);
*out.write().get_mut(i) = val;
}
drop(tx)
});
}
// wait until the channel hang up (every task finished)
drop(tx);
for () in rx.iter() {}
}
fn mult_Av_impl(v: &Vec<f64>, i: uint) -> f64 {
let mut sum = 0.;
for (j, &v_j) in v.iter().enumerate() {
sum += v_j / A(i, j);
}
sum
}
fn mult_Av(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>) {
mult(v, out, mult_Av_impl);
}
fn mult_Atv_impl(v: &Vec<f64>, i: uint) -> f64 {
let mut sum = 0.;
for (j, &v_j) in v.iter().enumerate() {
sum += v_j / A(j, i);
}
sum
}
fn mult_Atv(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>) {
mult(v, out, mult_Atv_impl);
}
fn mult_AtAv(v: Arc<RWLock<Vec<f64>>>, out: Arc<RWLock<Vec<f64>>>,
tmp: Arc<RWLock<Vec<f64>>>) {
mult_Av(v, tmp.clone());
mult_Atv(tmp, out);
}
fn main() {
let args = os::args();
let n = if os::getenv("RUST_BENCH").is_some() | else if args.len() < 2 {
2000
} else {
FromStr::from_str(args[1]).unwrap()
};
let u = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
let v = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
let tmp = Arc::new(RWLock::new(Vec::from_elem(n, 1.)));
for _ in range(0, 10) {
mult_AtAv(u.clone(), v.clone(), tmp.clone());
mult_AtAv(v.clone(), u.clone(), tmp.clone());
}
let u = u.read();
let v = v.read();
println!("{:.9f}", (dot(u.as_slice(), v.as_slice()) /
dot(v.as_slice(), v.as_slice())).sqrt());
}
| {
5500
} | conditional_block |
keychain.rs | use std::collections::HashMap;
use super::operations::BurnchainOpSigner;
use stacks::chainstate::stacks::{StacksTransactionSigner, TransactionAuth, StacksPublicKey, StacksPrivateKey, StacksAddress};
use stacks::address::AddressHashMode;
use stacks::burnchains::{BurnchainSigner, PrivateKey};
use stacks::util::vrf::{VRF, VRFProof, VRFPublicKey, VRFPrivateKey};
use stacks::util::hash::{Sha256Sum};
#[derive(Clone)]
pub struct Keychain {
secret_keys: Vec<StacksPrivateKey>,
threshold: u16,
hash_mode: AddressHashMode,
pub hashed_secret_state: Sha256Sum,
microblocks_secret_keys: Vec<StacksPrivateKey>,
vrf_secret_keys: Vec<VRFPrivateKey>,
vrf_map: HashMap<VRFPublicKey, VRFPrivateKey>,
}
impl Keychain {
pub fn new(secret_keys: Vec<StacksPrivateKey>, threshold: u16, hash_mode: AddressHashMode) -> Keychain {
// Compute hashed secret state
let hashed_secret_state = {
let mut buf : Vec<u8> = secret_keys.iter()
.flat_map(|sk| sk.to_bytes())
.collect();
buf.extend_from_slice(&[(threshold >> 8) as u8, (threshold & 0xff) as u8, hash_mode as u8]);
Sha256Sum::from_data(&buf[..])
};
Self {
hash_mode,
hashed_secret_state,
microblocks_secret_keys: vec![],
secret_keys,
threshold,
vrf_secret_keys: vec![],
vrf_map: HashMap::new(),
}
}
pub fn default(seed: Vec<u8>) -> Keychain {
let mut re_hashed_seed = seed;
let secret_key = loop {
match StacksPrivateKey::from_slice(&re_hashed_seed[..]) {
Ok(sk) => break sk,
Err(_) => re_hashed_seed = Sha256Sum::from_data(&re_hashed_seed[..]).as_bytes().to_vec()
}
};
let threshold = 1;
let hash_mode = AddressHashMode::SerializeP2PKH;
Keychain::new(vec![secret_key], threshold, hash_mode)
}
pub fn rotate_vrf_keypair(&mut self, block_height: u64) -> VRFPublicKey {
let mut seed = {
let mut secret_state = self.hashed_secret_state.to_bytes().to_vec();
secret_state.extend_from_slice(&block_height.to_be_bytes()[..]);
Sha256Sum::from_data(&secret_state)
};
// Not every 256-bit number is a valid Ed25519 secret key.
// As such, we continuously generate seeds through re-hashing until one works.
let sk = loop {
match VRFPrivateKey::from_bytes(seed.as_bytes()) {
Some(sk) => break sk,
None => seed = Sha256Sum::from_data(seed.as_bytes())
}
};
let pk = VRFPublicKey::from_private(&sk);
self.vrf_secret_keys.push(sk.clone());
self.vrf_map.insert(pk.clone(), sk);
pk
}
pub fn rotate_microblock_keypair(&mut self) -> StacksPrivateKey {
let mut seed = match self.microblocks_secret_keys.last() {
// First key is the hash of the secret state
None => self.hashed_secret_state,
// Next key is the hash of the last
Some(last_sk) => Sha256Sum::from_data(&last_sk.to_bytes()[..]),
};
// Not every 256-bit number is a valid secp256k1 secret key.
// As such, we continuously generate seeds through re-hashing until one works.
let mut sk = loop {
match StacksPrivateKey::from_slice(&seed.to_bytes()[..]) {
Ok(sk) => break sk,
Err(_) => seed = Sha256Sum::from_data(seed.as_bytes())
}
};
sk.set_compress_public(true);
self.microblocks_secret_keys.push(sk.clone());
sk
}
pub fn get_microblock_key(&self) -> Option<StacksPrivateKey> {
self.microblocks_secret_keys.last().cloned()
}
pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () {
let num_keys = if self.secret_keys.len() < self.threshold as usize {
self.secret_keys.len()
} else {
self.threshold as usize
};
for i in 0..num_keys {
tx_signer.sign_origin(&self.secret_keys[i]).unwrap();
}
}
/// Given a VRF public key, generates a VRF Proof
pub fn generate_proof(&self, vrf_pk: &VRFPublicKey, bytes: &[u8; 32]) -> Option<VRFProof> {
// Retrieve the corresponding VRF secret key
let vrf_sk = match self.vrf_map.get(vrf_pk) {
Some(vrf_pk) => vrf_pk,
None => return None
};
// Generate the proof
let proof = VRF::prove(&vrf_sk, &bytes.to_vec());
// Ensure that the proof is valid by verifying
let is_valid = match VRF::verify(vrf_pk, &proof, &bytes.to_vec()) {
Ok(v) => v,
Err(_) => false
};
assert!(is_valid);
Some(proof)
}
/// Given the keychain's secret keys, computes and returns the corresponding Stack address.
/// Note: Testnet bit is hardcoded.
pub fn get_address(&self) -> StacksAddress {
let public_keys = self.secret_keys.iter().map(|ref pk| StacksPublicKey::from_private(pk)).collect();
StacksAddress::from_public_keys(
self.hash_mode.to_version_testnet(),
&self.hash_mode,
self.threshold as usize,
&public_keys).unwrap()
}
pub fn address_from_burnchain_signer(signer: &BurnchainSigner) -> StacksAddress |
pub fn get_burnchain_signer(&self) -> BurnchainSigner {
let public_keys = self.secret_keys.iter().map(|ref pk| StacksPublicKey::from_private(pk)).collect();
BurnchainSigner {
hash_mode: self.hash_mode,
num_sigs: self.threshold as usize,
public_keys
}
}
pub fn get_transaction_auth(&self) -> Option<TransactionAuth> {
match self.hash_mode {
AddressHashMode::SerializeP2PKH => TransactionAuth::from_p2pkh(&self.secret_keys[0]),
AddressHashMode::SerializeP2SH => TransactionAuth::from_p2sh(&self.secret_keys, self.threshold),
AddressHashMode::SerializeP2WPKH => TransactionAuth::from_p2wpkh(&self.secret_keys[0]),
AddressHashMode::SerializeP2WSH => TransactionAuth::from_p2wsh(&self.secret_keys, self.threshold),
}
}
pub fn origin_address(&self) -> Option<StacksAddress> {
match self.get_transaction_auth() {
// Note: testnet hard-coded
Some(auth) => Some(auth.origin().address_testnet()),
None => None
}
}
pub fn generate_op_signer(&self) -> BurnchainOpSigner {
BurnchainOpSigner::new(self.secret_keys[0], false)
}
}
| {
StacksAddress::from_public_keys(
signer.hash_mode.to_version_testnet(),
&signer.hash_mode,
signer.num_sigs,
&signer.public_keys).unwrap()
} | identifier_body |
keychain.rs | use std::collections::HashMap;
use super::operations::BurnchainOpSigner;
use stacks::chainstate::stacks::{StacksTransactionSigner, TransactionAuth, StacksPublicKey, StacksPrivateKey, StacksAddress};
use stacks::address::AddressHashMode;
use stacks::burnchains::{BurnchainSigner, PrivateKey};
use stacks::util::vrf::{VRF, VRFProof, VRFPublicKey, VRFPrivateKey};
use stacks::util::hash::{Sha256Sum};
#[derive(Clone)]
pub struct | {
secret_keys: Vec<StacksPrivateKey>,
threshold: u16,
hash_mode: AddressHashMode,
pub hashed_secret_state: Sha256Sum,
microblocks_secret_keys: Vec<StacksPrivateKey>,
vrf_secret_keys: Vec<VRFPrivateKey>,
vrf_map: HashMap<VRFPublicKey, VRFPrivateKey>,
}
impl Keychain {
pub fn new(secret_keys: Vec<StacksPrivateKey>, threshold: u16, hash_mode: AddressHashMode) -> Keychain {
// Compute hashed secret state
let hashed_secret_state = {
let mut buf : Vec<u8> = secret_keys.iter()
.flat_map(|sk| sk.to_bytes())
.collect();
buf.extend_from_slice(&[(threshold >> 8) as u8, (threshold & 0xff) as u8, hash_mode as u8]);
Sha256Sum::from_data(&buf[..])
};
Self {
hash_mode,
hashed_secret_state,
microblocks_secret_keys: vec![],
secret_keys,
threshold,
vrf_secret_keys: vec![],
vrf_map: HashMap::new(),
}
}
pub fn default(seed: Vec<u8>) -> Keychain {
let mut re_hashed_seed = seed;
let secret_key = loop {
match StacksPrivateKey::from_slice(&re_hashed_seed[..]) {
Ok(sk) => break sk,
Err(_) => re_hashed_seed = Sha256Sum::from_data(&re_hashed_seed[..]).as_bytes().to_vec()
}
};
let threshold = 1;
let hash_mode = AddressHashMode::SerializeP2PKH;
Keychain::new(vec![secret_key], threshold, hash_mode)
}
pub fn rotate_vrf_keypair(&mut self, block_height: u64) -> VRFPublicKey {
let mut seed = {
let mut secret_state = self.hashed_secret_state.to_bytes().to_vec();
secret_state.extend_from_slice(&block_height.to_be_bytes()[..]);
Sha256Sum::from_data(&secret_state)
};
// Not every 256-bit number is a valid Ed25519 secret key.
// As such, we continuously generate seeds through re-hashing until one works.
let sk = loop {
match VRFPrivateKey::from_bytes(seed.as_bytes()) {
Some(sk) => break sk,
None => seed = Sha256Sum::from_data(seed.as_bytes())
}
};
let pk = VRFPublicKey::from_private(&sk);
self.vrf_secret_keys.push(sk.clone());
self.vrf_map.insert(pk.clone(), sk);
pk
}
pub fn rotate_microblock_keypair(&mut self) -> StacksPrivateKey {
let mut seed = match self.microblocks_secret_keys.last() {
// First key is the hash of the secret state
None => self.hashed_secret_state,
// Next key is the hash of the last
Some(last_sk) => Sha256Sum::from_data(&last_sk.to_bytes()[..]),
};
// Not every 256-bit number is a valid secp256k1 secret key.
// As such, we continuously generate seeds through re-hashing until one works.
let mut sk = loop {
match StacksPrivateKey::from_slice(&seed.to_bytes()[..]) {
Ok(sk) => break sk,
Err(_) => seed = Sha256Sum::from_data(seed.as_bytes())
}
};
sk.set_compress_public(true);
self.microblocks_secret_keys.push(sk.clone());
sk
}
pub fn get_microblock_key(&self) -> Option<StacksPrivateKey> {
self.microblocks_secret_keys.last().cloned()
}
pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () {
let num_keys = if self.secret_keys.len() < self.threshold as usize {
self.secret_keys.len()
} else {
self.threshold as usize
};
for i in 0..num_keys {
tx_signer.sign_origin(&self.secret_keys[i]).unwrap();
}
}
/// Given a VRF public key, generates a VRF Proof
pub fn generate_proof(&self, vrf_pk: &VRFPublicKey, bytes: &[u8; 32]) -> Option<VRFProof> {
// Retrieve the corresponding VRF secret key
let vrf_sk = match self.vrf_map.get(vrf_pk) {
Some(vrf_pk) => vrf_pk,
None => return None
};
// Generate the proof
let proof = VRF::prove(&vrf_sk, &bytes.to_vec());
// Ensure that the proof is valid by verifying
let is_valid = match VRF::verify(vrf_pk, &proof, &bytes.to_vec()) {
Ok(v) => v,
Err(_) => false
};
assert!(is_valid);
Some(proof)
}
/// Given the keychain's secret keys, computes and returns the corresponding Stack address.
/// Note: Testnet bit is hardcoded.
pub fn get_address(&self) -> StacksAddress {
let public_keys = self.secret_keys.iter().map(|ref pk| StacksPublicKey::from_private(pk)).collect();
StacksAddress::from_public_keys(
self.hash_mode.to_version_testnet(),
&self.hash_mode,
self.threshold as usize,
&public_keys).unwrap()
}
pub fn address_from_burnchain_signer(signer: &BurnchainSigner) -> StacksAddress {
StacksAddress::from_public_keys(
signer.hash_mode.to_version_testnet(),
&signer.hash_mode,
signer.num_sigs,
&signer.public_keys).unwrap()
}
pub fn get_burnchain_signer(&self) -> BurnchainSigner {
let public_keys = self.secret_keys.iter().map(|ref pk| StacksPublicKey::from_private(pk)).collect();
BurnchainSigner {
hash_mode: self.hash_mode,
num_sigs: self.threshold as usize,
public_keys
}
}
pub fn get_transaction_auth(&self) -> Option<TransactionAuth> {
match self.hash_mode {
AddressHashMode::SerializeP2PKH => TransactionAuth::from_p2pkh(&self.secret_keys[0]),
AddressHashMode::SerializeP2SH => TransactionAuth::from_p2sh(&self.secret_keys, self.threshold),
AddressHashMode::SerializeP2WPKH => TransactionAuth::from_p2wpkh(&self.secret_keys[0]),
AddressHashMode::SerializeP2WSH => TransactionAuth::from_p2wsh(&self.secret_keys, self.threshold),
}
}
pub fn origin_address(&self) -> Option<StacksAddress> {
match self.get_transaction_auth() {
// Note: testnet hard-coded
Some(auth) => Some(auth.origin().address_testnet()),
None => None
}
}
pub fn generate_op_signer(&self) -> BurnchainOpSigner {
BurnchainOpSigner::new(self.secret_keys[0], false)
}
}
| Keychain | identifier_name |
keychain.rs | use std::collections::HashMap;
use super::operations::BurnchainOpSigner;
use stacks::chainstate::stacks::{StacksTransactionSigner, TransactionAuth, StacksPublicKey, StacksPrivateKey, StacksAddress};
use stacks::address::AddressHashMode;
use stacks::burnchains::{BurnchainSigner, PrivateKey};
use stacks::util::vrf::{VRF, VRFProof, VRFPublicKey, VRFPrivateKey};
use stacks::util::hash::{Sha256Sum};
#[derive(Clone)]
pub struct Keychain {
secret_keys: Vec<StacksPrivateKey>,
threshold: u16,
hash_mode: AddressHashMode,
pub hashed_secret_state: Sha256Sum,
microblocks_secret_keys: Vec<StacksPrivateKey>,
vrf_secret_keys: Vec<VRFPrivateKey>,
vrf_map: HashMap<VRFPublicKey, VRFPrivateKey>,
}
impl Keychain {
pub fn new(secret_keys: Vec<StacksPrivateKey>, threshold: u16, hash_mode: AddressHashMode) -> Keychain {
// Compute hashed secret state
let hashed_secret_state = {
let mut buf : Vec<u8> = secret_keys.iter()
.flat_map(|sk| sk.to_bytes())
.collect();
buf.extend_from_slice(&[(threshold >> 8) as u8, (threshold & 0xff) as u8, hash_mode as u8]);
Sha256Sum::from_data(&buf[..])
};
Self {
hash_mode,
hashed_secret_state,
microblocks_secret_keys: vec![],
secret_keys,
threshold,
vrf_secret_keys: vec![],
vrf_map: HashMap::new(),
}
}
pub fn default(seed: Vec<u8>) -> Keychain {
let mut re_hashed_seed = seed;
let secret_key = loop {
match StacksPrivateKey::from_slice(&re_hashed_seed[..]) {
Ok(sk) => break sk,
Err(_) => re_hashed_seed = Sha256Sum::from_data(&re_hashed_seed[..]).as_bytes().to_vec()
}
};
let threshold = 1;
let hash_mode = AddressHashMode::SerializeP2PKH;
Keychain::new(vec![secret_key], threshold, hash_mode)
}
pub fn rotate_vrf_keypair(&mut self, block_height: u64) -> VRFPublicKey {
let mut seed = {
let mut secret_state = self.hashed_secret_state.to_bytes().to_vec();
secret_state.extend_from_slice(&block_height.to_be_bytes()[..]);
Sha256Sum::from_data(&secret_state)
};
// Not every 256-bit number is a valid Ed25519 secret key.
// As such, we continuously generate seeds through re-hashing until one works.
let sk = loop {
match VRFPrivateKey::from_bytes(seed.as_bytes()) {
Some(sk) => break sk,
None => seed = Sha256Sum::from_data(seed.as_bytes())
}
};
let pk = VRFPublicKey::from_private(&sk);
self.vrf_secret_keys.push(sk.clone());
self.vrf_map.insert(pk.clone(), sk);
pk
}
pub fn rotate_microblock_keypair(&mut self) -> StacksPrivateKey {
let mut seed = match self.microblocks_secret_keys.last() {
// First key is the hash of the secret state
None => self.hashed_secret_state,
// Next key is the hash of the last
Some(last_sk) => Sha256Sum::from_data(&last_sk.to_bytes()[..]),
};
// Not every 256-bit number is a valid secp256k1 secret key.
// As such, we continuously generate seeds through re-hashing until one works.
let mut sk = loop {
match StacksPrivateKey::from_slice(&seed.to_bytes()[..]) {
Ok(sk) => break sk,
Err(_) => seed = Sha256Sum::from_data(seed.as_bytes())
}
};
sk.set_compress_public(true);
self.microblocks_secret_keys.push(sk.clone());
sk
}
pub fn get_microblock_key(&self) -> Option<StacksPrivateKey> {
self.microblocks_secret_keys.last().cloned()
}
pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () {
let num_keys = if self.secret_keys.len() < self.threshold as usize {
self.secret_keys.len()
} else {
self.threshold as usize
};
for i in 0..num_keys {
tx_signer.sign_origin(&self.secret_keys[i]).unwrap();
}
}
/// Given a VRF public key, generates a VRF Proof
pub fn generate_proof(&self, vrf_pk: &VRFPublicKey, bytes: &[u8; 32]) -> Option<VRFProof> {
// Retrieve the corresponding VRF secret key
let vrf_sk = match self.vrf_map.get(vrf_pk) {
Some(vrf_pk) => vrf_pk,
None => return None
};
// Generate the proof
let proof = VRF::prove(&vrf_sk, &bytes.to_vec());
// Ensure that the proof is valid by verifying
let is_valid = match VRF::verify(vrf_pk, &proof, &bytes.to_vec()) {
Ok(v) => v,
Err(_) => false
};
assert!(is_valid);
Some(proof)
}
/// Given the keychain's secret keys, computes and returns the corresponding Stack address.
/// Note: Testnet bit is hardcoded.
pub fn get_address(&self) -> StacksAddress {
let public_keys = self.secret_keys.iter().map(|ref pk| StacksPublicKey::from_private(pk)).collect();
StacksAddress::from_public_keys(
self.hash_mode.to_version_testnet(),
&self.hash_mode, | }
pub fn address_from_burnchain_signer(signer: &BurnchainSigner) -> StacksAddress {
StacksAddress::from_public_keys(
signer.hash_mode.to_version_testnet(),
&signer.hash_mode,
signer.num_sigs,
&signer.public_keys).unwrap()
}
pub fn get_burnchain_signer(&self) -> BurnchainSigner {
let public_keys = self.secret_keys.iter().map(|ref pk| StacksPublicKey::from_private(pk)).collect();
BurnchainSigner {
hash_mode: self.hash_mode,
num_sigs: self.threshold as usize,
public_keys
}
}
pub fn get_transaction_auth(&self) -> Option<TransactionAuth> {
match self.hash_mode {
AddressHashMode::SerializeP2PKH => TransactionAuth::from_p2pkh(&self.secret_keys[0]),
AddressHashMode::SerializeP2SH => TransactionAuth::from_p2sh(&self.secret_keys, self.threshold),
AddressHashMode::SerializeP2WPKH => TransactionAuth::from_p2wpkh(&self.secret_keys[0]),
AddressHashMode::SerializeP2WSH => TransactionAuth::from_p2wsh(&self.secret_keys, self.threshold),
}
}
pub fn origin_address(&self) -> Option<StacksAddress> {
match self.get_transaction_auth() {
// Note: testnet hard-coded
Some(auth) => Some(auth.origin().address_testnet()),
None => None
}
}
pub fn generate_op_signer(&self) -> BurnchainOpSigner {
BurnchainOpSigner::new(self.secret_keys[0], false)
}
} | self.threshold as usize,
&public_keys).unwrap() | random_line_split |
params-on-stack.rs | // build-fail
// compile-flags: --target thumbv8m.main-none-eabi --crate-type lib
// needs-llvm-components: arm
#![feature(abi_c_cmse_nonsecure_call, no_core, lang_items, intrinsics)]
#![no_core]
#[lang="sized"]
pub trait Sized { }
#[lang="copy"]
pub trait Copy { }
extern "rust-intrinsic" {
pub fn transmute<T, U>(e: T) -> U;
}
#[no_mangle]
pub fn | (a: u32, b: u32, c: u32, d: u32, e: u32) -> u32 {
let non_secure_function = unsafe {
transmute::<
usize,
extern "C-cmse-nonsecure-call" fn(u32, u32, u32, u32, u32) -> u32>
(
0x10000004,
)
};
non_secure_function(a, b, c, d, e)
}
| test | identifier_name |
params-on-stack.rs | // build-fail
// compile-flags: --target thumbv8m.main-none-eabi --crate-type lib | #[lang="sized"]
pub trait Sized { }
#[lang="copy"]
pub trait Copy { }
extern "rust-intrinsic" {
pub fn transmute<T, U>(e: T) -> U;
}
#[no_mangle]
pub fn test(a: u32, b: u32, c: u32, d: u32, e: u32) -> u32 {
let non_secure_function = unsafe {
transmute::<
usize,
extern "C-cmse-nonsecure-call" fn(u32, u32, u32, u32, u32) -> u32>
(
0x10000004,
)
};
non_secure_function(a, b, c, d, e)
} | // needs-llvm-components: arm
#![feature(abi_c_cmse_nonsecure_call, no_core, lang_items, intrinsics)]
#![no_core] | random_line_split |
params-on-stack.rs | // build-fail
// compile-flags: --target thumbv8m.main-none-eabi --crate-type lib
// needs-llvm-components: arm
#![feature(abi_c_cmse_nonsecure_call, no_core, lang_items, intrinsics)]
#![no_core]
#[lang="sized"]
pub trait Sized { }
#[lang="copy"]
pub trait Copy { }
extern "rust-intrinsic" {
pub fn transmute<T, U>(e: T) -> U;
}
#[no_mangle]
pub fn test(a: u32, b: u32, c: u32, d: u32, e: u32) -> u32 | {
let non_secure_function = unsafe {
transmute::<
usize,
extern "C-cmse-nonsecure-call" fn(u32, u32, u32, u32, u32) -> u32>
(
0x10000004,
)
};
non_secure_function(a, b, c, d, e)
} | identifier_body |
|
filter.rs | use std::default::Default;
use std::str;
use nom::IResult;
use nom::{be_u8, digit, is_alphabetic, is_alphanumeric, is_hex_digit};
use lber::common::TagClass;
use lber::structures::{Boolean, ExplicitTag, OctetString, Sequence, Tag};
#[doc(hidden)]
pub fn parse(input: &str) -> Result<Tag, ()> {
match filtexpr(input.as_bytes()) {
IResult::Done(r, t) => {
if r.is_empty() {
Ok(t)
} else {
Err(())
}
},
IResult::Error(_) | IResult::Incomplete(_) => Err(()),
}
}
const AND_FILT: u64 = 0;
const OR_FILT: u64 = 1;
const NOT_FILT: u64 = 2;
const EQ_MATCH: u64 = 3;
const SUBSTR_MATCH: u64 = 4;
const GTE_MATCH: u64 = 5;
const LTE_MATCH: u64 = 6;
const PRES_MATCH: u64 = 7;
const APPROX_MATCH: u64 = 8;
const EXT_MATCH: u64 = 9;
const SUB_INITIAL: u64 = 0;
const SUB_ANY: u64 = 1;
const SUB_FINAL: u64 = 2;
named!(filtexpr<Tag>, alt!(filter | item));
named!(filter<Tag>, delimited!(char!('('), filtercomp, char!(')')));
named!(filtercomp<Tag>, alt!(and | or | not | item));
named!(filterlist<Vec<Tag>>, many0!(filter));
named!(and<Tag>, map!(preceded!(char!('&'), filterlist),
|tagv: Vec<Tag>| -> Tag {
Tag::Sequence(Sequence {
class: TagClass::Context,
id: AND_FILT,
inner: tagv,
})
}
));
named!(or<Tag>, map!(preceded!(char!('|'), filterlist),
|tagv: Vec<Tag>| -> Tag {
Tag::Sequence(Sequence {
class: TagClass::Context,
id: OR_FILT,
inner: tagv,
})
}
));
named!(not<Tag>, map!(preceded!(char!('!'), filter),
|tag: Tag| -> Tag {
Tag::ExplicitTag(ExplicitTag {
class: TagClass::Context,
id: NOT_FILT,
inner: Box::new(tag),
})
}
));
named!(item<Tag>, alt!(eq | non_eq | extensible));
enum Unescaper {
WantFirst,
WantSecond(u8),
Value(u8),
Error,
}
impl Unescaper {
fn feed(&self, c: u8) -> Unescaper {
match *self {
Unescaper::Error => Unescaper::Error,
Unescaper::WantFirst => {
if is_hex_digit(c) {
Unescaper::WantSecond(c - if c <= b'9' { b'0' } else { (c & 0x20) + b'A' - 10 })
} else {
Unescaper::Error
}
},
Unescaper::WantSecond(partial) => {
if is_hex_digit(c) {
Unescaper::Value((partial << 4) + (c - if c <= b'9' { b'0' } else { (c & 0x20) + b'A' - 10 }))
} else {
Unescaper::Error
}
},
Unescaper::Value(_v) => if c!= b'\\' { Unescaper::Value(c) } else { Unescaper::WantFirst },
}
}
}
// Any byte in the assertion value may be represented by \NN, where N is a hex digit.
// Some characters must be represented in this way: parentheses, asterisk and backslash
// itself.
named!(unescaped<Vec<u8>>, map_res!(fold_many0!(
verify!(be_u8, is_value_char),
(Unescaper::Value(0), Vec::new()),
|(mut u, mut vec): (Unescaper, Vec<_>), c: u8| {
u = u.feed(c);
if let Unescaper::Value(c) = u {
vec.push(c);
}
(u, vec)
}), |(u, vec): (Unescaper, Vec<_>)| -> Result<Vec<u8>, ()> {
if let Unescaper::Value(_) = u {
Ok(vec)
} else {
Err(())
}
}
));
named!(non_eq<Tag>, do_parse!(
attr: attributedescription >>
filterop: alt!(tag!(">=") | tag!("<=") | tag!("~=")) >>
value: unescaped >> ({
Tag::Sequence(Sequence {
class: TagClass::Context,
id: filtertag(filterop),
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::OctetString(OctetString {
inner: value,
.. Default::default()
})
]
})
})
));
fn filtertag(filterop: &[u8]) -> u64 {
match filterop {
b">=" => GTE_MATCH,
b"<=" => LTE_MATCH,
b"~=" => APPROX_MATCH,
_ => unimplemented!(),
}
}
named!(eq<Tag>, do_parse!(
attr: attributedescription >>
tag!("=") >>
initial: unescaped >>
mid_final: map_res!(many0!(preceded!(tag!("*"), unescaped)), |v: Vec<Vec<u8>>| -> Result<Vec<Vec<u8>>, ()> {
// an empty element may exist only at the very end; otherwise, we have two adjacent asterisks
if v.iter().enumerate().fold(false, |acc, (n, ve)| acc || ve.is_empty() && n + 1!= v.len()) {
Err(())
} else {
Ok(v)
}
}) >> ({
if mid_final.is_empty() {
// simple equality, no asterisks in assertion value
Tag::Sequence(Sequence {
class: TagClass::Context,
id: EQ_MATCH,
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::OctetString(OctetString {
inner: initial,
.. Default::default()
})
]
})
} else if initial.is_empty() && mid_final.len() == 1 && mid_final[0].is_empty() {
// presence, single asterisk in assertion value
Tag::OctetString(OctetString {
class: TagClass::Context,
id: PRES_MATCH,
inner: attr.to_vec(),
})
} else {
// substring match
let mut inner = vec![];
if!initial.is_empty() {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: SUB_INITIAL,
inner: initial,
}));
}
let n = mid_final.len();
for (i, sub_elem) in mid_final.into_iter().enumerate() {
if sub_elem.is_empty() {
break;
}
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: if i + 1!= n { SUB_ANY } else { SUB_FINAL },
inner: sub_elem,
}));
}
Tag::Sequence(Sequence {
class: TagClass::Context,
id: SUBSTR_MATCH,
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::Sequence(Sequence {
inner: inner,
.. Default::default()
})
]
})
}
})
));
fn is_value_char(c: u8) -> bool {
c!= 0 && c!= b'(' && c!= b')' && c!= b'*'
}
named!(extensible<Tag>, alt!(attr_dn_mrule | dn_mrule));
named!(attr_dn_mrule<Tag>, do_parse!(
attr: attributedescription >>
dn: opt!(tag!(":dn")) >>
mrule: opt!(preceded!(char!(':'), attributetype)) >>
tag!(":=") >>
value: unescaped >>
(extensible_tag(mrule, Some(attr), value, dn.is_some()))
));
named!(dn_mrule<Tag>, do_parse!(
dn: opt!(tag!(":dn")) >>
mrule: preceded!(char!(':'), attributetype) >>
tag!(":=") >>
value: unescaped >>
(extensible_tag(Some(mrule), None, value, dn.is_some()))
));
fn extensible_tag(mrule: Option<&[u8]>, attr: Option<&[u8]>, value: Vec<u8>, dn: bool) -> Tag {
let mut inner = vec![];
if let Some(mrule) = mrule {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 1,
inner: mrule.to_vec()
}));
}
if let Some(attr) = attr |
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 3,
inner: value
}));
if dn {
inner.push(Tag::Boolean(Boolean {
class: TagClass::Context,
id: 4,
inner: dn
}));
}
Tag::Sequence(Sequence {
class: TagClass::Context,
id: EXT_MATCH,
inner: inner
})
}
named!(attributedescription<&[u8]>, recognize!(do_parse!(
_type: attributetype >>
_opts: many0!(preceded!(char!(';'), take_while1!(is_alnum_hyphen))) >> ()
)));
named!(attributetype<&[u8]>, alt!(numericoid | descr));
named!(numericoid<&[u8]>, recognize!(
do_parse!(
_leading: number >>
_rest: many0!(preceded!(char!('.'), number)) >> ()
)
));
// A number may be zero, but must not have superfluous leading zeroes
named!(number<&[u8]>, verify!(digit, |d: &[u8]| d.len() == 1 || d[0]!= b'0'));
named!(descr<&[u8]>, recognize!(
do_parse!(
_leading: verify!(be_u8, |a: u8| is_alphabetic(a)) >>
_rest: take_while!(is_alnum_hyphen) >> ()
)
));
fn is_alnum_hyphen(c: u8) -> bool {
is_alphanumeric(c) || c == b'-'
}
| {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 2,
inner: attr.to_vec()
}));
} | conditional_block |
filter.rs | use std::default::Default;
use std::str;
use nom::IResult;
use nom::{be_u8, digit, is_alphabetic, is_alphanumeric, is_hex_digit};
use lber::common::TagClass;
use lber::structures::{Boolean, ExplicitTag, OctetString, Sequence, Tag};
#[doc(hidden)]
pub fn parse(input: &str) -> Result<Tag, ()> {
match filtexpr(input.as_bytes()) {
IResult::Done(r, t) => {
if r.is_empty() {
Ok(t)
} else {
Err(())
}
},
IResult::Error(_) | IResult::Incomplete(_) => Err(()),
}
}
const AND_FILT: u64 = 0;
const OR_FILT: u64 = 1;
const NOT_FILT: u64 = 2;
const EQ_MATCH: u64 = 3;
const SUBSTR_MATCH: u64 = 4;
const GTE_MATCH: u64 = 5;
const LTE_MATCH: u64 = 6;
const PRES_MATCH: u64 = 7;
const APPROX_MATCH: u64 = 8;
const EXT_MATCH: u64 = 9;
const SUB_INITIAL: u64 = 0;
const SUB_ANY: u64 = 1;
const SUB_FINAL: u64 = 2;
named!(filtexpr<Tag>, alt!(filter | item));
named!(filter<Tag>, delimited!(char!('('), filtercomp, char!(')')));
named!(filtercomp<Tag>, alt!(and | or | not | item));
named!(filterlist<Vec<Tag>>, many0!(filter));
named!(and<Tag>, map!(preceded!(char!('&'), filterlist),
|tagv: Vec<Tag>| -> Tag {
Tag::Sequence(Sequence {
class: TagClass::Context,
id: AND_FILT,
inner: tagv,
})
}
));
named!(or<Tag>, map!(preceded!(char!('|'), filterlist),
|tagv: Vec<Tag>| -> Tag {
Tag::Sequence(Sequence {
class: TagClass::Context,
id: OR_FILT,
inner: tagv,
})
}
));
named!(not<Tag>, map!(preceded!(char!('!'), filter),
|tag: Tag| -> Tag {
Tag::ExplicitTag(ExplicitTag {
class: TagClass::Context,
id: NOT_FILT,
inner: Box::new(tag),
})
}
));
named!(item<Tag>, alt!(eq | non_eq | extensible));
enum Unescaper {
WantFirst,
WantSecond(u8),
Value(u8),
Error,
}
impl Unescaper {
fn feed(&self, c: u8) -> Unescaper {
match *self {
Unescaper::Error => Unescaper::Error,
Unescaper::WantFirst => {
if is_hex_digit(c) {
Unescaper::WantSecond(c - if c <= b'9' { b'0' } else { (c & 0x20) + b'A' - 10 })
} else {
Unescaper::Error
}
},
Unescaper::WantSecond(partial) => {
if is_hex_digit(c) {
Unescaper::Value((partial << 4) + (c - if c <= b'9' { b'0' } else { (c & 0x20) + b'A' - 10 }))
} else {
Unescaper::Error
}
},
Unescaper::Value(_v) => if c!= b'\\' { Unescaper::Value(c) } else { Unescaper::WantFirst },
}
}
}
// Any byte in the assertion value may be represented by \NN, where N is a hex digit.
// Some characters must be represented in this way: parentheses, asterisk and backslash
// itself.
named!(unescaped<Vec<u8>>, map_res!(fold_many0!(
verify!(be_u8, is_value_char),
(Unescaper::Value(0), Vec::new()),
|(mut u, mut vec): (Unescaper, Vec<_>), c: u8| {
u = u.feed(c);
if let Unescaper::Value(c) = u {
vec.push(c);
}
(u, vec)
}), |(u, vec): (Unescaper, Vec<_>)| -> Result<Vec<u8>, ()> {
if let Unescaper::Value(_) = u {
Ok(vec)
} else {
Err(())
}
}
));
named!(non_eq<Tag>, do_parse!(
attr: attributedescription >>
filterop: alt!(tag!(">=") | tag!("<=") | tag!("~=")) >>
value: unescaped >> ({
Tag::Sequence(Sequence {
class: TagClass::Context,
id: filtertag(filterop),
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::OctetString(OctetString {
inner: value,
.. Default::default()
})
]
})
})
));
fn filtertag(filterop: &[u8]) -> u64 {
match filterop {
b">=" => GTE_MATCH,
b"<=" => LTE_MATCH,
b"~=" => APPROX_MATCH,
_ => unimplemented!(),
}
}
named!(eq<Tag>, do_parse!(
attr: attributedescription >>
tag!("=") >>
initial: unescaped >>
mid_final: map_res!(many0!(preceded!(tag!("*"), unescaped)), |v: Vec<Vec<u8>>| -> Result<Vec<Vec<u8>>, ()> {
// an empty element may exist only at the very end; otherwise, we have two adjacent asterisks
if v.iter().enumerate().fold(false, |acc, (n, ve)| acc || ve.is_empty() && n + 1!= v.len()) {
Err(())
} else {
Ok(v)
}
}) >> ({
if mid_final.is_empty() {
// simple equality, no asterisks in assertion value
Tag::Sequence(Sequence {
class: TagClass::Context,
id: EQ_MATCH,
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::OctetString(OctetString {
inner: initial,
.. Default::default()
})
]
})
} else if initial.is_empty() && mid_final.len() == 1 && mid_final[0].is_empty() {
// presence, single asterisk in assertion value
Tag::OctetString(OctetString {
class: TagClass::Context,
id: PRES_MATCH,
inner: attr.to_vec(),
})
} else {
// substring match
let mut inner = vec![];
if!initial.is_empty() {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: SUB_INITIAL,
inner: initial,
}));
}
let n = mid_final.len();
for (i, sub_elem) in mid_final.into_iter().enumerate() {
if sub_elem.is_empty() {
break;
}
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: if i + 1!= n { SUB_ANY } else { SUB_FINAL },
inner: sub_elem,
}));
}
Tag::Sequence(Sequence {
class: TagClass::Context,
id: SUBSTR_MATCH,
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::Sequence(Sequence {
inner: inner,
.. Default::default()
})
]
})
}
})
));
fn is_value_char(c: u8) -> bool {
c!= 0 && c!= b'(' && c!= b')' && c!= b'*'
}
named!(extensible<Tag>, alt!(attr_dn_mrule | dn_mrule));
named!(attr_dn_mrule<Tag>, do_parse!(
attr: attributedescription >>
dn: opt!(tag!(":dn")) >>
mrule: opt!(preceded!(char!(':'), attributetype)) >>
tag!(":=") >>
value: unescaped >>
(extensible_tag(mrule, Some(attr), value, dn.is_some()))
));
named!(dn_mrule<Tag>, do_parse!(
dn: opt!(tag!(":dn")) >>
mrule: preceded!(char!(':'), attributetype) >>
tag!(":=") >>
value: unescaped >>
(extensible_tag(Some(mrule), None, value, dn.is_some()))
));
fn extensible_tag(mrule: Option<&[u8]>, attr: Option<&[u8]>, value: Vec<u8>, dn: bool) -> Tag | }));
if dn {
inner.push(Tag::Boolean(Boolean {
class: TagClass::Context,
id: 4,
inner: dn
}));
}
Tag::Sequence(Sequence {
class: TagClass::Context,
id: EXT_MATCH,
inner: inner
})
}
named!(attributedescription<&[u8]>, recognize!(do_parse!(
_type: attributetype >>
_opts: many0!(preceded!(char!(';'), take_while1!(is_alnum_hyphen))) >> ()
)));
named!(attributetype<&[u8]>, alt!(numericoid | descr));
named!(numericoid<&[u8]>, recognize!(
do_parse!(
_leading: number >>
_rest: many0!(preceded!(char!('.'), number)) >> ()
)
));
// A number may be zero, but must not have superfluous leading zeroes
named!(number<&[u8]>, verify!(digit, |d: &[u8]| d.len() == 1 || d[0]!= b'0'));
named!(descr<&[u8]>, recognize!(
do_parse!(
_leading: verify!(be_u8, |a: u8| is_alphabetic(a)) >>
_rest: take_while!(is_alnum_hyphen) >> ()
)
));
fn is_alnum_hyphen(c: u8) -> bool {
is_alphanumeric(c) || c == b'-'
}
| {
let mut inner = vec![];
if let Some(mrule) = mrule {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 1,
inner: mrule.to_vec()
}));
}
if let Some(attr) = attr {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 2,
inner: attr.to_vec()
}));
}
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 3,
inner: value | identifier_body |
filter.rs | use std::default::Default;
use std::str;
use nom::IResult;
use nom::{be_u8, digit, is_alphabetic, is_alphanumeric, is_hex_digit};
use lber::common::TagClass;
use lber::structures::{Boolean, ExplicitTag, OctetString, Sequence, Tag};
#[doc(hidden)]
pub fn parse(input: &str) -> Result<Tag, ()> {
match filtexpr(input.as_bytes()) {
IResult::Done(r, t) => {
if r.is_empty() {
Ok(t)
} else {
Err(())
}
},
IResult::Error(_) | IResult::Incomplete(_) => Err(()),
}
}
const AND_FILT: u64 = 0;
const OR_FILT: u64 = 1;
const NOT_FILT: u64 = 2;
const EQ_MATCH: u64 = 3;
const SUBSTR_MATCH: u64 = 4;
const GTE_MATCH: u64 = 5;
const LTE_MATCH: u64 = 6;
const PRES_MATCH: u64 = 7;
const APPROX_MATCH: u64 = 8;
const EXT_MATCH: u64 = 9;
const SUB_INITIAL: u64 = 0;
const SUB_ANY: u64 = 1;
const SUB_FINAL: u64 = 2;
named!(filtexpr<Tag>, alt!(filter | item));
named!(filter<Tag>, delimited!(char!('('), filtercomp, char!(')')));
named!(filtercomp<Tag>, alt!(and | or | not | item));
named!(filterlist<Vec<Tag>>, many0!(filter));
named!(and<Tag>, map!(preceded!(char!('&'), filterlist),
|tagv: Vec<Tag>| -> Tag {
Tag::Sequence(Sequence {
class: TagClass::Context,
id: AND_FILT,
inner: tagv,
})
}
));
named!(or<Tag>, map!(preceded!(char!('|'), filterlist),
|tagv: Vec<Tag>| -> Tag {
Tag::Sequence(Sequence {
class: TagClass::Context,
id: OR_FILT,
inner: tagv,
})
}
));
named!(not<Tag>, map!(preceded!(char!('!'), filter),
|tag: Tag| -> Tag {
Tag::ExplicitTag(ExplicitTag {
class: TagClass::Context,
id: NOT_FILT,
inner: Box::new(tag),
})
}
));
named!(item<Tag>, alt!(eq | non_eq | extensible));
enum Unescaper {
WantFirst,
WantSecond(u8),
Value(u8),
Error,
}
impl Unescaper {
fn feed(&self, c: u8) -> Unescaper {
match *self {
Unescaper::Error => Unescaper::Error,
Unescaper::WantFirst => {
if is_hex_digit(c) {
Unescaper::WantSecond(c - if c <= b'9' { b'0' } else { (c & 0x20) + b'A' - 10 })
} else {
Unescaper::Error
}
},
Unescaper::WantSecond(partial) => {
if is_hex_digit(c) {
Unescaper::Value((partial << 4) + (c - if c <= b'9' { b'0' } else { (c & 0x20) + b'A' - 10 }))
} else {
Unescaper::Error
}
},
Unescaper::Value(_v) => if c!= b'\\' { Unescaper::Value(c) } else { Unescaper::WantFirst },
}
}
}
// Any byte in the assertion value may be represented by \NN, where N is a hex digit.
// Some characters must be represented in this way: parentheses, asterisk and backslash
// itself.
named!(unescaped<Vec<u8>>, map_res!(fold_many0!(
verify!(be_u8, is_value_char),
(Unescaper::Value(0), Vec::new()),
|(mut u, mut vec): (Unescaper, Vec<_>), c: u8| {
u = u.feed(c);
if let Unescaper::Value(c) = u {
vec.push(c);
}
(u, vec)
}), |(u, vec): (Unescaper, Vec<_>)| -> Result<Vec<u8>, ()> {
if let Unescaper::Value(_) = u {
Ok(vec)
} else {
Err(())
}
}
));
named!(non_eq<Tag>, do_parse!(
attr: attributedescription >>
filterop: alt!(tag!(">=") | tag!("<=") | tag!("~=")) >>
value: unescaped >> ({
Tag::Sequence(Sequence {
class: TagClass::Context,
id: filtertag(filterop),
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::OctetString(OctetString {
inner: value,
.. Default::default()
})
]
})
})
));
fn | (filterop: &[u8]) -> u64 {
match filterop {
b">=" => GTE_MATCH,
b"<=" => LTE_MATCH,
b"~=" => APPROX_MATCH,
_ => unimplemented!(),
}
}
named!(eq<Tag>, do_parse!(
attr: attributedescription >>
tag!("=") >>
initial: unescaped >>
mid_final: map_res!(many0!(preceded!(tag!("*"), unescaped)), |v: Vec<Vec<u8>>| -> Result<Vec<Vec<u8>>, ()> {
// an empty element may exist only at the very end; otherwise, we have two adjacent asterisks
if v.iter().enumerate().fold(false, |acc, (n, ve)| acc || ve.is_empty() && n + 1!= v.len()) {
Err(())
} else {
Ok(v)
}
}) >> ({
if mid_final.is_empty() {
// simple equality, no asterisks in assertion value
Tag::Sequence(Sequence {
class: TagClass::Context,
id: EQ_MATCH,
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::OctetString(OctetString {
inner: initial,
.. Default::default()
})
]
})
} else if initial.is_empty() && mid_final.len() == 1 && mid_final[0].is_empty() {
// presence, single asterisk in assertion value
Tag::OctetString(OctetString {
class: TagClass::Context,
id: PRES_MATCH,
inner: attr.to_vec(),
})
} else {
// substring match
let mut inner = vec![];
if!initial.is_empty() {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: SUB_INITIAL,
inner: initial,
}));
}
let n = mid_final.len();
for (i, sub_elem) in mid_final.into_iter().enumerate() {
if sub_elem.is_empty() {
break;
}
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: if i + 1!= n { SUB_ANY } else { SUB_FINAL },
inner: sub_elem,
}));
}
Tag::Sequence(Sequence {
class: TagClass::Context,
id: SUBSTR_MATCH,
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::Sequence(Sequence {
inner: inner,
.. Default::default()
})
]
})
}
})
));
fn is_value_char(c: u8) -> bool {
c!= 0 && c!= b'(' && c!= b')' && c!= b'*'
}
named!(extensible<Tag>, alt!(attr_dn_mrule | dn_mrule));
named!(attr_dn_mrule<Tag>, do_parse!(
attr: attributedescription >>
dn: opt!(tag!(":dn")) >>
mrule: opt!(preceded!(char!(':'), attributetype)) >>
tag!(":=") >>
value: unescaped >>
(extensible_tag(mrule, Some(attr), value, dn.is_some()))
));
named!(dn_mrule<Tag>, do_parse!(
dn: opt!(tag!(":dn")) >>
mrule: preceded!(char!(':'), attributetype) >>
tag!(":=") >>
value: unescaped >>
(extensible_tag(Some(mrule), None, value, dn.is_some()))
));
fn extensible_tag(mrule: Option<&[u8]>, attr: Option<&[u8]>, value: Vec<u8>, dn: bool) -> Tag {
let mut inner = vec![];
if let Some(mrule) = mrule {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 1,
inner: mrule.to_vec()
}));
}
if let Some(attr) = attr {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 2,
inner: attr.to_vec()
}));
}
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 3,
inner: value
}));
if dn {
inner.push(Tag::Boolean(Boolean {
class: TagClass::Context,
id: 4,
inner: dn
}));
}
Tag::Sequence(Sequence {
class: TagClass::Context,
id: EXT_MATCH,
inner: inner
})
}
named!(attributedescription<&[u8]>, recognize!(do_parse!(
_type: attributetype >>
_opts: many0!(preceded!(char!(';'), take_while1!(is_alnum_hyphen))) >> ()
)));
named!(attributetype<&[u8]>, alt!(numericoid | descr));
named!(numericoid<&[u8]>, recognize!(
do_parse!(
_leading: number >>
_rest: many0!(preceded!(char!('.'), number)) >> ()
)
));
// A number may be zero, but must not have superfluous leading zeroes
named!(number<&[u8]>, verify!(digit, |d: &[u8]| d.len() == 1 || d[0]!= b'0'));
named!(descr<&[u8]>, recognize!(
do_parse!(
_leading: verify!(be_u8, |a: u8| is_alphabetic(a)) >>
_rest: take_while!(is_alnum_hyphen) >> ()
)
));
fn is_alnum_hyphen(c: u8) -> bool {
is_alphanumeric(c) || c == b'-'
}
| filtertag | identifier_name |
filter.rs | use std::default::Default;
use std::str;
use nom::IResult;
use nom::{be_u8, digit, is_alphabetic, is_alphanumeric, is_hex_digit};
use lber::common::TagClass;
use lber::structures::{Boolean, ExplicitTag, OctetString, Sequence, Tag};
#[doc(hidden)]
pub fn parse(input: &str) -> Result<Tag, ()> {
match filtexpr(input.as_bytes()) {
IResult::Done(r, t) => {
if r.is_empty() {
Ok(t)
} else {
Err(())
}
},
IResult::Error(_) | IResult::Incomplete(_) => Err(()),
}
}
const AND_FILT: u64 = 0;
const OR_FILT: u64 = 1;
const NOT_FILT: u64 = 2;
const EQ_MATCH: u64 = 3;
const SUBSTR_MATCH: u64 = 4;
const GTE_MATCH: u64 = 5;
const LTE_MATCH: u64 = 6;
const PRES_MATCH: u64 = 7;
const APPROX_MATCH: u64 = 8;
const EXT_MATCH: u64 = 9;
const SUB_INITIAL: u64 = 0;
const SUB_ANY: u64 = 1;
const SUB_FINAL: u64 = 2;
named!(filtexpr<Tag>, alt!(filter | item));
named!(filter<Tag>, delimited!(char!('('), filtercomp, char!(')')));
named!(filtercomp<Tag>, alt!(and | or | not | item));
named!(filterlist<Vec<Tag>>, many0!(filter));
named!(and<Tag>, map!(preceded!(char!('&'), filterlist),
|tagv: Vec<Tag>| -> Tag {
Tag::Sequence(Sequence {
class: TagClass::Context,
id: AND_FILT,
inner: tagv,
})
}
));
named!(or<Tag>, map!(preceded!(char!('|'), filterlist),
|tagv: Vec<Tag>| -> Tag {
Tag::Sequence(Sequence {
class: TagClass::Context,
id: OR_FILT,
inner: tagv,
})
}
));
named!(not<Tag>, map!(preceded!(char!('!'), filter),
|tag: Tag| -> Tag {
Tag::ExplicitTag(ExplicitTag {
class: TagClass::Context,
id: NOT_FILT,
inner: Box::new(tag),
})
}
));
named!(item<Tag>, alt!(eq | non_eq | extensible));
enum Unescaper {
WantFirst,
WantSecond(u8),
Value(u8),
Error,
}
impl Unescaper {
fn feed(&self, c: u8) -> Unescaper {
match *self {
Unescaper::Error => Unescaper::Error,
Unescaper::WantFirst => { | Unescaper::Error
}
},
Unescaper::WantSecond(partial) => {
if is_hex_digit(c) {
Unescaper::Value((partial << 4) + (c - if c <= b'9' { b'0' } else { (c & 0x20) + b'A' - 10 }))
} else {
Unescaper::Error
}
},
Unescaper::Value(_v) => if c!= b'\\' { Unescaper::Value(c) } else { Unescaper::WantFirst },
}
}
}
// Any byte in the assertion value may be represented by \NN, where N is a hex digit.
// Some characters must be represented in this way: parentheses, asterisk and backslash
// itself.
named!(unescaped<Vec<u8>>, map_res!(fold_many0!(
verify!(be_u8, is_value_char),
(Unescaper::Value(0), Vec::new()),
|(mut u, mut vec): (Unescaper, Vec<_>), c: u8| {
u = u.feed(c);
if let Unescaper::Value(c) = u {
vec.push(c);
}
(u, vec)
}), |(u, vec): (Unescaper, Vec<_>)| -> Result<Vec<u8>, ()> {
if let Unescaper::Value(_) = u {
Ok(vec)
} else {
Err(())
}
}
));
named!(non_eq<Tag>, do_parse!(
attr: attributedescription >>
filterop: alt!(tag!(">=") | tag!("<=") | tag!("~=")) >>
value: unescaped >> ({
Tag::Sequence(Sequence {
class: TagClass::Context,
id: filtertag(filterop),
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::OctetString(OctetString {
inner: value,
.. Default::default()
})
]
})
})
));
fn filtertag(filterop: &[u8]) -> u64 {
match filterop {
b">=" => GTE_MATCH,
b"<=" => LTE_MATCH,
b"~=" => APPROX_MATCH,
_ => unimplemented!(),
}
}
named!(eq<Tag>, do_parse!(
attr: attributedescription >>
tag!("=") >>
initial: unescaped >>
mid_final: map_res!(many0!(preceded!(tag!("*"), unescaped)), |v: Vec<Vec<u8>>| -> Result<Vec<Vec<u8>>, ()> {
// an empty element may exist only at the very end; otherwise, we have two adjacent asterisks
if v.iter().enumerate().fold(false, |acc, (n, ve)| acc || ve.is_empty() && n + 1!= v.len()) {
Err(())
} else {
Ok(v)
}
}) >> ({
if mid_final.is_empty() {
// simple equality, no asterisks in assertion value
Tag::Sequence(Sequence {
class: TagClass::Context,
id: EQ_MATCH,
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::OctetString(OctetString {
inner: initial,
.. Default::default()
})
]
})
} else if initial.is_empty() && mid_final.len() == 1 && mid_final[0].is_empty() {
// presence, single asterisk in assertion value
Tag::OctetString(OctetString {
class: TagClass::Context,
id: PRES_MATCH,
inner: attr.to_vec(),
})
} else {
// substring match
let mut inner = vec![];
if!initial.is_empty() {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: SUB_INITIAL,
inner: initial,
}));
}
let n = mid_final.len();
for (i, sub_elem) in mid_final.into_iter().enumerate() {
if sub_elem.is_empty() {
break;
}
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: if i + 1!= n { SUB_ANY } else { SUB_FINAL },
inner: sub_elem,
}));
}
Tag::Sequence(Sequence {
class: TagClass::Context,
id: SUBSTR_MATCH,
inner: vec![
Tag::OctetString(OctetString {
inner: attr.to_vec(),
.. Default::default()
}),
Tag::Sequence(Sequence {
inner: inner,
.. Default::default()
})
]
})
}
})
));
fn is_value_char(c: u8) -> bool {
c!= 0 && c!= b'(' && c!= b')' && c!= b'*'
}
named!(extensible<Tag>, alt!(attr_dn_mrule | dn_mrule));
named!(attr_dn_mrule<Tag>, do_parse!(
attr: attributedescription >>
dn: opt!(tag!(":dn")) >>
mrule: opt!(preceded!(char!(':'), attributetype)) >>
tag!(":=") >>
value: unescaped >>
(extensible_tag(mrule, Some(attr), value, dn.is_some()))
));
named!(dn_mrule<Tag>, do_parse!(
dn: opt!(tag!(":dn")) >>
mrule: preceded!(char!(':'), attributetype) >>
tag!(":=") >>
value: unescaped >>
(extensible_tag(Some(mrule), None, value, dn.is_some()))
));
fn extensible_tag(mrule: Option<&[u8]>, attr: Option<&[u8]>, value: Vec<u8>, dn: bool) -> Tag {
let mut inner = vec![];
if let Some(mrule) = mrule {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 1,
inner: mrule.to_vec()
}));
}
if let Some(attr) = attr {
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 2,
inner: attr.to_vec()
}));
}
inner.push(Tag::OctetString(OctetString {
class: TagClass::Context,
id: 3,
inner: value
}));
if dn {
inner.push(Tag::Boolean(Boolean {
class: TagClass::Context,
id: 4,
inner: dn
}));
}
Tag::Sequence(Sequence {
class: TagClass::Context,
id: EXT_MATCH,
inner: inner
})
}
named!(attributedescription<&[u8]>, recognize!(do_parse!(
_type: attributetype >>
_opts: many0!(preceded!(char!(';'), take_while1!(is_alnum_hyphen))) >> ()
)));
named!(attributetype<&[u8]>, alt!(numericoid | descr));
named!(numericoid<&[u8]>, recognize!(
do_parse!(
_leading: number >>
_rest: many0!(preceded!(char!('.'), number)) >> ()
)
));
// A number may be zero, but must not have superfluous leading zeroes
named!(number<&[u8]>, verify!(digit, |d: &[u8]| d.len() == 1 || d[0]!= b'0'));
named!(descr<&[u8]>, recognize!(
do_parse!(
_leading: verify!(be_u8, |a: u8| is_alphabetic(a)) >>
_rest: take_while!(is_alnum_hyphen) >> ()
)
));
fn is_alnum_hyphen(c: u8) -> bool {
is_alphanumeric(c) || c == b'-'
} | if is_hex_digit(c) {
Unescaper::WantSecond(c - if c <= b'9' { b'0' } else { (c & 0x20) + b'A' - 10 })
} else { | random_line_split |
unary-op-disambig.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Preserve semicolons that disambiguate unops
fn f() { }
fn | () -> isize { { f() }; -1 }
fn block_nosemi() -> isize { ({ 0 }) - 1 }
fn if_semi() -> isize { if true { f() } else { f() }; -1 }
fn if_nosemi() -> isize { (if true { 0 } else { 0 }) - 1 }
fn alt_semi() -> isize { match true { true => { f() } _ => { } }; -1 }
fn alt_no_semi() -> isize { (match true { true => { 0 } _ => { 1 } }) - 1 }
fn stmt() { { f() }; -1; }
| block_semi | identifier_name |
unary-op-disambig.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Preserve semicolons that disambiguate unops
fn f() { }
fn block_semi() -> isize { { f() }; -1 }
fn block_nosemi() -> isize { ({ 0 }) - 1 }
fn if_semi() -> isize { if true { f() } else { f() }; -1 }
| fn alt_semi() -> isize { match true { true => { f() } _ => { } }; -1 }
fn alt_no_semi() -> isize { (match true { true => { 0 } _ => { 1 } }) - 1 }
fn stmt() { { f() }; -1; } | fn if_nosemi() -> isize { (if true { 0 } else { 0 }) - 1 }
| random_line_split |
unary-op-disambig.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Preserve semicolons that disambiguate unops
fn f() { }
fn block_semi() -> isize |
fn block_nosemi() -> isize { ({ 0 }) - 1 }
fn if_semi() -> isize { if true { f() } else { f() }; -1 }
fn if_nosemi() -> isize { (if true { 0 } else { 0 }) - 1 }
fn alt_semi() -> isize { match true { true => { f() } _ => { } }; -1 }
fn alt_no_semi() -> isize { (match true { true => { 0 } _ => { 1 } }) - 1 }
fn stmt() { { f() }; -1; }
| { { f() }; -1 } | identifier_body |
lib.rs | };
use msg::constellation_msg::{PipelineId, PipelineNamespaceId, TraversalDirection};
use net_traits::{ReferrerPolicy, ResourceThreads};
use net_traits::image::base::Image;
use net_traits::image_cache::ImageCache;
use net_traits::response::HttpsState;
use net_traits::storage_thread::StorageType;
use profile_traits::mem;
use profile_traits::time as profile_time;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use servo_url::ImmutableOrigin;
use servo_url::ServoUrl;
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use style_traits::{CSSPixel, UnsafeNode};
use webdriver_msg::{LoadStatus, WebDriverScriptCommand};
use webvr_traits::{WebVRDisplayEvent, WebVRMsg};
pub use script_msg::{LayoutMsg, ScriptMsg, EventResult, LogEntry};
pub use script_msg::{ServiceWorkerMsg, ScopeThings, SWManagerMsg, SWManagerSenders, DOMMessage};
/// The address of a node. Layout sends these back. They must be validated via
/// `from_untrusted_node_address` before they can be used, because we do not trust layout.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct UntrustedNodeAddress(pub *const c_void);
impl HeapSizeOf for UntrustedNodeAddress {
fn heap_size_of_children(&self) -> usize {
0
}
}
#[allow(unsafe_code)]
unsafe impl Send for UntrustedNodeAddress {}
impl Serialize for UntrustedNodeAddress {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
(self.0 as usize).serialize(s)
}
}
impl Deserialize for UntrustedNodeAddress {
fn deserialize<D: Deserializer>(d: D) -> Result<UntrustedNodeAddress, D::Error> {
let value: usize = try!(Deserialize::deserialize(d));
Ok(UntrustedNodeAddress::from_id(value))
}
}
impl UntrustedNodeAddress {
/// Creates an `UntrustedNodeAddress` from the given pointer address value.
#[inline]
pub fn from_id(id: usize) -> UntrustedNodeAddress {
UntrustedNodeAddress(id as *const c_void)
}
}
/// Messages sent to the layout thread from the constellation and/or compositor.
#[derive(Deserialize, Serialize)]
pub enum LayoutControlMsg {
/// Requests that this layout thread exit.
ExitNow,
/// Requests the current epoch (layout counter) from this layout.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks layout to run another step in its animation.
TickAnimations,
/// Tells layout about the new scrolling offsets of each scrollable stacking context.
SetStackingContextScrollStates(Vec<StackingContextScrollState>),
/// Requests the current load state of Web fonts. `true` is returned if fonts are still loading
/// and `false` is returned if all fonts have loaded.
GetWebFontLoadState(IpcSender<bool>),
}
/// can be passed to `LoadUrl` to load a page with GET/POST
/// parameters or headers
#[derive(Clone, Deserialize, Serialize)]
pub struct LoadData {
/// The URL.
pub url: ServoUrl,
/// The method.
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub method: Method,
/// The headers.
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub headers: Headers,
/// The data.
pub data: Option<Vec<u8>>,
/// The referrer policy.
pub referrer_policy: Option<ReferrerPolicy>,
/// The referrer URL.
pub referrer_url: Option<ServoUrl>,
}
impl LoadData {
/// Create a new `LoadData` object.
pub fn new(url: ServoUrl, referrer_policy: Option<ReferrerPolicy>, referrer_url: Option<ServoUrl>) -> LoadData {
LoadData {
url: url,
method: Method::Get,
headers: Headers::new(),
data: None,
referrer_policy: referrer_policy,
referrer_url: referrer_url,
}
}
}
/// The initial data required to create a new layout attached to an existing script thread.
#[derive(Deserialize, Serialize)]
pub struct NewLayoutInfo {
/// The ID of the parent pipeline and frame type, if any.
/// If `None`, this is a root pipeline.
pub parent_info: Option<(PipelineId, FrameType)>,
/// Id of the newly-created pipeline.
pub new_pipeline_id: PipelineId,
/// Id of the frame associated with this pipeline.
pub frame_id: FrameId,
/// Network request data which will be initiated by the script thread.
pub load_data: LoadData,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// A port on which layout can receive messages from the pipeline.
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
/// A shutdown channel so that layout can tell the content process to shut down when it's done.
pub content_process_shutdown_chan: Option<IpcSender<()>>,
/// Number of threads to use for layout.
pub layout_threads: usize,
}
/// When a pipeline is closed, should its browsing context be discarded too?
#[derive(Copy, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)]
pub enum DiscardBrowsingContext {
/// Discard the browsing context
Yes,
/// Don't discard the browsing context
No,
}
/// Is a document fully active, active or inactive?
/// A document is active if it is the current active document in its session history,
/// it is fuly active if it is active and all of its ancestors are active,
/// and it is inactive otherwise.
/// https://html.spec.whatwg.org/multipage/#active-document
/// https://html.spec.whatwg.org/multipage/#fully-active
#[derive(Copy, Clone, PartialEq, Eq, Hash, HeapSizeOf, Debug, Deserialize, Serialize)]
pub enum DocumentActivity {
/// An inactive document
Inactive,
/// An active but not fully active document
Active,
/// A fully active document
FullyActive,
}
/// Messages sent from the constellation or layout to the script thread.
#[derive(Deserialize, Serialize)]
pub enum ConstellationControlMsg {
/// Gives a channel and ID to a layout thread, as well as the ID of that layout's parent
AttachLayout(NewLayoutInfo),
/// Window resized. Sends a DOM event eventually, but first we combine events. | ResizeInactive(PipelineId, WindowSizeData),
/// Notifies the script that a pipeline should be closed.
ExitPipeline(PipelineId, DiscardBrowsingContext),
/// Notifies the script that the whole thread should be closed.
ExitScriptThread,
/// Sends a DOM event.
SendEvent(PipelineId, CompositorEvent),
/// Notifies script of the viewport.
Viewport(PipelineId, Rect<f32>),
/// Notifies script of a new set of scroll offsets.
SetScrollState(PipelineId, Vec<(UntrustedNodeAddress, Point2D<f32>)>),
/// Requests that the script thread immediately send the constellation the title of a pipeline.
GetTitle(PipelineId),
/// Notifies script thread of a change to one of its document's activity
SetDocumentActivity(PipelineId, DocumentActivity),
/// Notifies script thread whether frame is visible
ChangeFrameVisibilityStatus(PipelineId, bool),
/// Notifies script thread that frame visibility change is complete
/// PipelineId is for the parent, FrameId is for the actual frame.
NotifyVisibilityChange(PipelineId, FrameId, bool),
/// Notifies script thread that a url should be loaded in this iframe.
/// PipelineId is for the parent, FrameId is for the actual frame.
Navigate(PipelineId, FrameId, LoadData, bool),
/// Post a message to a given window.
PostMessage(PipelineId, Option<ImmutableOrigin>, Vec<u8>),
/// Requests the script thread forward a mozbrowser event to an iframe it owns,
/// or to the window if no child frame id is provided.
MozBrowserEvent(PipelineId, Option<FrameId>, MozBrowserEvent),
/// Updates the current pipeline ID of a given iframe.
/// First PipelineId is for the parent, second is the new PipelineId for the frame.
UpdatePipelineId(PipelineId, FrameId, PipelineId),
/// Set an iframe to be focused. Used when an element in an iframe gains focus.
/// PipelineId is for the parent, FrameId is for the actual frame.
FocusIFrame(PipelineId, FrameId),
/// Passes a webdriver command to the script thread for execution
WebDriverScriptCommand(PipelineId, WebDriverScriptCommand),
/// Notifies script thread that all animations are done
TickAllAnimations(PipelineId),
/// Notifies the script thread of a transition end
TransitionEnd(UnsafeNode, String, f64),
/// Notifies the script thread that a new Web font has been loaded, and thus the page should be
/// reflowed.
WebFontLoaded(PipelineId),
/// Cause a `load` event to be dispatched at the appropriate frame element.
DispatchFrameLoadEvent {
/// The frame that has been marked as loaded.
target: FrameId,
/// The pipeline that contains a frame loading the target pipeline.
parent: PipelineId,
/// The pipeline that has completed loading.
child: PipelineId,
},
/// Cause a `storage` event to be dispatched at the appropriate window.
/// The strings are key, old value and new value.
DispatchStorageEvent(PipelineId, StorageType, ServoUrl, Option<String>, Option<String>, Option<String>),
/// Notifies a parent pipeline that one of its child frames is now active.
/// PipelineId is for the parent, FrameId is the child frame.
FramedContentChanged(PipelineId, FrameId),
/// Report an error from a CSS parser for the given pipeline
ReportCSSError(PipelineId, String, usize, usize, String),
/// Reload the given page.
Reload(PipelineId),
/// Notifies the script thread of a WebVR device event
WebVREvent(PipelineId, WebVREventMsg)
}
impl fmt::Debug for ConstellationControlMsg {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
use self::ConstellationControlMsg::*;
let variant = match *self {
AttachLayout(..) => "AttachLayout",
Resize(..) => "Resize",
ResizeInactive(..) => "ResizeInactive",
ExitPipeline(..) => "ExitPipeline",
ExitScriptThread => "ExitScriptThread",
SendEvent(..) => "SendEvent",
Viewport(..) => "Viewport",
SetScrollState(..) => "SetScrollState",
GetTitle(..) => "GetTitle",
SetDocumentActivity(..) => "SetDocumentActivity",
ChangeFrameVisibilityStatus(..) => "ChangeFrameVisibilityStatus",
NotifyVisibilityChange(..) => "NotifyVisibilityChange",
Navigate(..) => "Navigate",
PostMessage(..) => "PostMessage",
MozBrowserEvent(..) => "MozBrowserEvent",
UpdatePipelineId(..) => "UpdatePipelineId",
FocusIFrame(..) => "FocusIFrame",
WebDriverScriptCommand(..) => "WebDriverScriptCommand",
TickAllAnimations(..) => "TickAllAnimations",
TransitionEnd(..) => "TransitionEnd",
WebFontLoaded(..) => "WebFontLoaded",
DispatchFrameLoadEvent {.. } => "DispatchFrameLoadEvent",
DispatchStorageEvent(..) => "DispatchStorageEvent",
FramedContentChanged(..) => "FramedContentChanged",
ReportCSSError(..) => "ReportCSSError",
Reload(..) => "Reload",
WebVREvent(..) => "WebVREvent",
};
write!(formatter, "ConstellationMsg::{}", variant)
}
}
/// Used to determine if a script has any pending asynchronous activity.
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum DocumentState {
/// The document has been loaded and is idle.
Idle,
/// The document is either loading or waiting on an event.
Pending,
}
/// For a given pipeline, whether any animations are currently running
/// and any animation callbacks are queued
#[derive(Clone, Eq, PartialEq, Deserialize, Serialize, Debug)]
pub enum AnimationState {
/// Animations are active but no callbacks are queued
AnimationsPresent,
/// Animations are active and callbacks are queued
AnimationCallbacksPresent,
/// No animations are active and no callbacks are queued
NoAnimationsPresent,
/// No animations are active but callbacks are queued
NoAnimationCallbacksPresent,
}
/// The type of input represented by a multi-touch event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum TouchEventType {
/// A new touch point came in contact with the screen.
Down,
/// An existing touch point changed location.
Move,
/// A touch point was removed from the screen.
Up,
/// The system stopped tracking a touch point.
Cancel,
}
/// An opaque identifier for a touch point.
///
/// http://w3c.github.io/touch-events/#widl-Touch-identifier
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct TouchId(pub i32);
/// The mouse button involved in the event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum MouseButton {
/// The left mouse button.
Left,
/// The middle mouse button.
Middle,
/// The right mouse button.
Right,
}
/// The types of mouse events
#[derive(Deserialize, HeapSizeOf, Serialize)]
pub enum MouseEventType {
/// Mouse button clicked
Click,
/// Mouse button down
MouseDown,
/// Mouse button up
MouseUp,
}
/// Events from the compositor that the script thread needs to know about
#[derive(Deserialize, Serialize)]
pub enum CompositorEvent {
/// The window was resized.
ResizeEvent(WindowSizeData, WindowSizeType),
/// A mouse button state changed.
MouseButtonEvent(MouseEventType, MouseButton, Point2D<f32>),
/// The mouse was moved over a point (or was moved out of the recognizable region).
MouseMoveEvent(Option<Point2D<f32>>),
/// A touch event was generated with a touch ID and location.
TouchEvent(TouchEventType, TouchId, Point2D<f32>),
/// Touchpad pressure event
TouchpadPressureEvent(Point2D<f32>, f32, TouchpadPressurePhase),
/// A key was pressed.
KeyEvent(Option<char>, Key, KeyState, KeyModifiers),
}
/// Touchpad pressure phase for `TouchpadPressureEvent`.
#[derive(Copy, Clone, HeapSizeOf, PartialEq, Deserialize, Serialize)]
pub enum TouchpadPressurePhase {
/// Pressure before a regular click.
BeforeClick,
/// Pressure after a regular click.
AfterFirstClick,
/// Pressure after a "forceTouch" click
AfterSecondClick,
}
/// Requests a TimerEvent-Message be sent after the given duration.
#[derive(Deserialize, Serialize)]
pub struct TimerEventRequest(pub IpcSender<TimerEvent>, pub TimerSource, pub TimerEventId, pub MsDuration);
/// Type of messages that can be sent to the timer scheduler.
#[derive(Deserialize, Serialize)]
pub enum TimerSchedulerMsg {
/// Message to schedule a new timer event.
Request(TimerEventRequest),
/// Message to exit the timer scheduler.
Exit,
}
/// Notifies the script thread to fire due timers.
/// `TimerSource` must be `FromWindow` when dispatched to `ScriptThread` and
/// must be `FromWorker` when dispatched to a `DedicatedGlobalWorkerScope`
#[derive(Deserialize, Serialize)]
pub struct TimerEvent(pub TimerSource, pub TimerEventId);
/// Describes the thread that requested the TimerEvent.
#[derive(Copy, Clone, HeapSizeOf, Deserialize, Serialize)]
pub enum TimerSource {
/// The event was requested from a window (ScriptThread).
FromWindow(PipelineId),
/// The event was requested from a worker (DedicatedGlobalWorkerScope).
FromWorker,
}
/// The id to be used for a `TimerEvent` is defined by the corresponding `TimerEventRequest`.
#[derive(PartialEq, Eq, Copy, Clone, Debug, HeapSizeOf, Deserialize, Serialize)]
pub struct TimerEventId(pub u32);
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Milliseconds {}
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Nanoseconds {}
/// Amount of milliseconds.
pub type MsDuration = Length<u64, Milliseconds>;
/// Amount of nanoseconds.
pub type NsDuration = Length<u64, Nanoseconds>;
/// Returns the duration since an unspecified epoch measured in ms.
pub fn precise_time_ms() -> MsDuration {
Length::new(time::precise_time_ns() / (1000 * 1000))
}
/// Returns the duration since an unspecified epoch measured in ns.
pub fn precise_time_ns() -> NsDuration {
Length::new(time::precise_time_ns())
}
/// Data needed to construct a script thread.
///
/// NB: *DO NOT* add any Senders or Receivers here! pcwalton will have to rewrite your code if you
/// do! Use IPC senders and receivers instead.
pub struct InitialScriptState {
/// The ID of the pipeline with which this script thread is associated.
pub id: PipelineId,
/// The subpage ID of this pipeline to create in its pipeline parent.
/// If `None`, this is the root.
pub parent_info: Option<(PipelineId, FrameType)>,
/// The ID of the frame this script is part of.
pub frame_id: FrameId,
/// The ID of the top-level frame this script is part of.
pub top_level_frame_id: FrameId,
/// A channel with which messages can be sent to us (the script thread).
pub control_chan: IpcSender<ConstellationControlMsg>,
/// A port on which messages sent by the constellation to script can be received.
pub control_port: IpcReceiver<ConstellationControlMsg>,
/// A channel on which messages can be sent to the constellation from script.
pub constellation_chan: IpcSender<ScriptMsg>,
/// A sender for the layout thread to communicate to the constellation.
pub layout_to_constellation_chan: IpcSender<LayoutMsg>,
/// A channel to schedule timer events.
pub scheduler_chan: IpcSender<TimerSchedulerMsg>,
/// A channel to the resource manager thread.
pub resource_threads: ResourceThreads,
/// A channel to the bluetooth thread.
pub bluetooth_thread: IpcSender<BluetoothRequest>,
/// The image cache for this script thread.
pub image_cache: Arc<ImageCache>,
/// A channel to the time profiler thread.
pub time_profiler_chan: profile_traits::time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
/// A ping will be sent on this channel once the script thread shuts down.
pub content_process_shutdown_chan: IpcSender<()>,
/// A channel to the webvr thread, if available.
pub webvr_thread: Option<IpcSender<WebVRMsg>>
}
/// This trait allows creating a `ScriptThread` without depending on the `script`
/// crate.
pub trait ScriptThreadFactory {
/// Type of message sent from script to layout.
type Message;
/// Create a `ScriptThread`.
fn create(state: InitialScriptState, load_data: LoadData)
-> (Sender<Self::Message>, Receiver<Self::Message>);
}
/// Whether the sandbox attribute is present for an iframe element
#[derive(PartialEq, Eq, Copy, Clone, Debug, Deserialize, Serialize)]
pub enum IFrameSandboxState {
/// Sandbox attribute is present
IFrameSandboxed,
/// Sandbox attribute is not present
IFrameUnsandboxed,
}
/// Specifies the information required to load an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfo {
/// Pipeline ID of the parent of this iframe
pub parent_pipeline_id: PipelineId,
/// The ID for this iframe.
pub frame_id: FrameId,
/// The new pipeline ID that the iframe has generated.
pub new_pipeline_id: PipelineId,
/// Whether this iframe should be considered private
pub is_private: bool,
/// Whether this iframe is a mozbrowser iframe
pub frame_type: FrameType,
/// Wether this load should replace the current entry (reload). If true, the current
/// entry will be replaced instead of a new entry being added.
pub replace: bool,
}
/// Specifies the information required to load a URL in an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfoWithData {
/// The information required to load an iframe.
pub info: IFrameLoadInfo,
/// Load data containing the url to load
pub load_data: Option<LoadData>,
/// The old pipeline ID for this iframe, if a page was previously loaded.
pub old_pipeline_id: Option<PipelineId>,
/// Sandbox type of this iframe
pub sandbox: IFrameSandboxState,
}
// https://developer.mozilla.org/en-US/docs/Web/API/Using_the_Browser_API#Events
/// The events fired in a Browser API context (`<iframe mozbrowser>`)
#[derive(Deserialize, Serialize)]
pub enum MozBrowserEvent {
/// Sent when the scroll position within a browser `<iframe>` changes.
AsyncScroll,
/// Sent when window.close() is called within a browser `<iframe>`.
Close,
/// Sent when a browser `<iframe>` tries to open a context menu. This allows
/// handling `<menuitem>` element available within the browser `<iframe>`'s content.
ContextMenu,
/// Sent when an error occurred while trying to load content within a browser `<iframe>`.
/// Includes a human-readable description, and a machine-readable report.
Error(MozBrowserErrorType, String, String),
/// Sent when the favicon of a browser `<iframe>` changes.
IconChange(String, String, String),
/// Sent when the browser `<iframe>` has reached the server.
Connected,
/// Sent when the browser `<iframe>` has finished loading all its assets.
LoadEnd,
/// Sent when the browser `<iframe>` starts to load a new page.
LoadStart,
/// Sent when a browser `<iframe>`'s location changes.
LocationChange(String, bool, bool),
/// Sent when a new tab is opened within a browser `<iframe>` as a result of the user
/// issuing a command to open a link target in a new tab (for example ctrl/cmd + click.)
/// Includes the URL.
OpenTab(String),
/// Sent when a new window is opened within a browser `<iframe>`.
/// Includes the URL, target browsing context name, and features.
OpenWindow(String, Option<String>, Option<String>),
/// Sent when the SSL state changes within a browser `<iframe>`.
SecurityChange(HttpsState),
/// Sent when alert(), confirm(), or prompt() is called within a browser `<iframe>`.
ShowModalPrompt(String, String, String, String), // TODO(simartin): Handle unblock()
/// Sent when the document.title changes within a browser `<iframe>`.
TitleChange(String),
/// Sent when an HTTP authentification is requested.
UsernameAndPasswordRequired,
/// Sent when a link to a search engine is found.
OpenSearch,
/// Sent when visibility state changes.
VisibilityChange(bool),
}
impl MozBrowserEvent {
/// Get the name of the event as a `& str`
pub fn name(&self) -> &'static str {
match *self {
MozBrowserEvent::AsyncScroll => "mozbrowserasyncscroll",
MozBrowserEvent::Close => "mozbrowserclose",
MozBrowserEvent::Connected => "mozbrowserconnected",
MozBrowserEvent::ContextMenu => "mozbrowsercontextmenu",
MozBrowserEvent::Error(_, _, _) => "mozbrowsererror",
MozBrowserEvent::IconChange(_, _, _) => "mozbrowsericonchange",
MozBrowserEvent::LoadEnd => "mozbrowserloadend",
MozBrowserEvent::LoadStart => "mozbrowserloadstart",
MozBrowserEvent::LocationChange(_, _, _) => "mozbrowserlocationchange",
MozBrowserEvent::OpenTab(_) => "mozbrowseropentab",
MozBrowserEvent::OpenWindow(_, _, _) => "mozbrowseropenwindow",
MozBrowserEvent::SecurityChange(_) => "mozbrowsersecuritychange",
MozBrowserEvent::ShowModalPrompt(_, _, _, _) => "mozbrowsershowmodalprompt",
MozBrowserEvent::TitleChange(_) => "mozbrowsertitlechange",
MozBrowserEvent::UsernameAndPasswordRequired => "mozbrowserusernameandpasswordrequired",
MozBrowserEvent::OpenSearch => "mozbrowseropensearch",
MozBrowserEvent::VisibilityChange(_) => "mozbrowservisibilitychange",
}
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsererror
/// The different types of Browser error events
#[derive(Deserialize, Serialize)]
pub enum MozBrowserErrorType {
// For the moment, we are just reporting panics, using the "fatal" type.
/// A fatal error
Fatal,
}
impl MozBrowserErrorType {
/// Get the name of the error type as a `& str`
pub fn name(&self) -> &'static str {
match *self {
MozBrowserErrorType::Fatal => "fatal",
}
}
}
/// Specifies whether the script or layout thread needs to be ticked for animation.
#[derive(Deserialize, Serialize)]
pub enum AnimationTickType {
/// The script thread.
Script,
/// The layout thread.
Layout,
}
/// The scroll state of a stacking context.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct StackingContextScrollState {
/// The ID of the scroll root.
pub scroll_root_id: ScrollRootId,
/// The scrolling offset of this stacking context.
pub scroll_offset: Point2D<f32>,
}
/// One hardware pixel.
///
/// This unit corresponds to the smallest addressable element of the display hardware.
#[derive(Copy, Clone, Debug)]
pub enum DevicePixel {}
/// Data about the window size.
#[derive(Copy, Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct WindowSizeData {
/// The size of the initial layout viewport, before parsing an
/// http://www.w3.org/TR/css-device-adapt/#initial-viewport
pub initial_viewport: TypedSize2D<f32, CSSPixel>,
/// The resolution of the window in dppx, not including any "pinch zoom" factor.
pub device_pixel_ratio: ScaleFactor<f32, CSSPixel, DevicePixel>,
}
/// The type of window size change.
#[derive(Deserialize, Eq, PartialEq, Serialize, Copy, Clone, HeapSizeOf)]
pub enum WindowSizeType {
/// Initial load.
Initial,
/// Window resize.
Resize,
}
/// Messages to the constellation originating from the WebDriver server.
#[derive(Deserialize, Serialize)]
pub enum WebDriverCommandMsg {
/// Get the window size.
GetWindowSize(PipelineId, IpcSender<WindowSizeData>),
/// Load a URL in the pipeline with the given ID.
LoadUrl(PipelineId, LoadData, IpcSender<LoadStatus>),
/// Refresh the pipeline with the given ID.
Refresh(PipelineId, IpcSender<LoadStatus>),
/// Pass a webdriver command to the script thread of the pipeline with the
/// given ID for execution.
ScriptCommand | Resize(PipelineId, WindowSizeData, WindowSizeType),
/// Notifies script that window has been resized but to not take immediate action. | random_line_split |
lib.rs |
use msg::constellation_msg::{PipelineId, PipelineNamespaceId, TraversalDirection};
use net_traits::{ReferrerPolicy, ResourceThreads};
use net_traits::image::base::Image;
use net_traits::image_cache::ImageCache;
use net_traits::response::HttpsState;
use net_traits::storage_thread::StorageType;
use profile_traits::mem;
use profile_traits::time as profile_time;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use servo_url::ImmutableOrigin;
use servo_url::ServoUrl;
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use style_traits::{CSSPixel, UnsafeNode};
use webdriver_msg::{LoadStatus, WebDriverScriptCommand};
use webvr_traits::{WebVRDisplayEvent, WebVRMsg};
pub use script_msg::{LayoutMsg, ScriptMsg, EventResult, LogEntry};
pub use script_msg::{ServiceWorkerMsg, ScopeThings, SWManagerMsg, SWManagerSenders, DOMMessage};
/// The address of a node. Layout sends these back. They must be validated via
/// `from_untrusted_node_address` before they can be used, because we do not trust layout.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct UntrustedNodeAddress(pub *const c_void);
impl HeapSizeOf for UntrustedNodeAddress {
fn heap_size_of_children(&self) -> usize {
0
}
}
#[allow(unsafe_code)]
unsafe impl Send for UntrustedNodeAddress {}
impl Serialize for UntrustedNodeAddress {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
(self.0 as usize).serialize(s)
}
}
impl Deserialize for UntrustedNodeAddress {
fn deserialize<D: Deserializer>(d: D) -> Result<UntrustedNodeAddress, D::Error> {
let value: usize = try!(Deserialize::deserialize(d));
Ok(UntrustedNodeAddress::from_id(value))
}
}
impl UntrustedNodeAddress {
/// Creates an `UntrustedNodeAddress` from the given pointer address value.
#[inline]
pub fn from_id(id: usize) -> UntrustedNodeAddress {
UntrustedNodeAddress(id as *const c_void)
}
}
/// Messages sent to the layout thread from the constellation and/or compositor.
#[derive(Deserialize, Serialize)]
pub enum LayoutControlMsg {
/// Requests that this layout thread exit.
ExitNow,
/// Requests the current epoch (layout counter) from this layout.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks layout to run another step in its animation.
TickAnimations,
/// Tells layout about the new scrolling offsets of each scrollable stacking context.
SetStackingContextScrollStates(Vec<StackingContextScrollState>),
/// Requests the current load state of Web fonts. `true` is returned if fonts are still loading
/// and `false` is returned if all fonts have loaded.
GetWebFontLoadState(IpcSender<bool>),
}
/// can be passed to `LoadUrl` to load a page with GET/POST
/// parameters or headers
#[derive(Clone, Deserialize, Serialize)]
pub struct LoadData {
/// The URL.
pub url: ServoUrl,
/// The method.
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub method: Method,
/// The headers.
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub headers: Headers,
/// The data.
pub data: Option<Vec<u8>>,
/// The referrer policy.
pub referrer_policy: Option<ReferrerPolicy>,
/// The referrer URL.
pub referrer_url: Option<ServoUrl>,
}
impl LoadData {
/// Create a new `LoadData` object.
pub fn new(url: ServoUrl, referrer_policy: Option<ReferrerPolicy>, referrer_url: Option<ServoUrl>) -> LoadData {
LoadData {
url: url,
method: Method::Get,
headers: Headers::new(),
data: None,
referrer_policy: referrer_policy,
referrer_url: referrer_url,
}
}
}
/// The initial data required to create a new layout attached to an existing script thread.
#[derive(Deserialize, Serialize)]
pub struct NewLayoutInfo {
/// The ID of the parent pipeline and frame type, if any.
/// If `None`, this is a root pipeline.
pub parent_info: Option<(PipelineId, FrameType)>,
/// Id of the newly-created pipeline.
pub new_pipeline_id: PipelineId,
/// Id of the frame associated with this pipeline.
pub frame_id: FrameId,
/// Network request data which will be initiated by the script thread.
pub load_data: LoadData,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// A port on which layout can receive messages from the pipeline.
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
/// A shutdown channel so that layout can tell the content process to shut down when it's done.
pub content_process_shutdown_chan: Option<IpcSender<()>>,
/// Number of threads to use for layout.
pub layout_threads: usize,
}
/// When a pipeline is closed, should its browsing context be discarded too?
#[derive(Copy, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)]
pub enum DiscardBrowsingContext {
/// Discard the browsing context
Yes,
/// Don't discard the browsing context
No,
}
/// Is a document fully active, active or inactive?
/// A document is active if it is the current active document in its session history,
/// it is fuly active if it is active and all of its ancestors are active,
/// and it is inactive otherwise.
/// https://html.spec.whatwg.org/multipage/#active-document
/// https://html.spec.whatwg.org/multipage/#fully-active
#[derive(Copy, Clone, PartialEq, Eq, Hash, HeapSizeOf, Debug, Deserialize, Serialize)]
pub enum DocumentActivity {
/// An inactive document
Inactive,
/// An active but not fully active document
Active,
/// A fully active document
FullyActive,
}
/// Messages sent from the constellation or layout to the script thread.
#[derive(Deserialize, Serialize)]
pub enum ConstellationControlMsg {
/// Gives a channel and ID to a layout thread, as well as the ID of that layout's parent
AttachLayout(NewLayoutInfo),
/// Window resized. Sends a DOM event eventually, but first we combine events.
Resize(PipelineId, WindowSizeData, WindowSizeType),
/// Notifies script that window has been resized but to not take immediate action.
ResizeInactive(PipelineId, WindowSizeData),
/// Notifies the script that a pipeline should be closed.
ExitPipeline(PipelineId, DiscardBrowsingContext),
/// Notifies the script that the whole thread should be closed.
ExitScriptThread,
/// Sends a DOM event.
SendEvent(PipelineId, CompositorEvent),
/// Notifies script of the viewport.
Viewport(PipelineId, Rect<f32>),
/// Notifies script of a new set of scroll offsets.
SetScrollState(PipelineId, Vec<(UntrustedNodeAddress, Point2D<f32>)>),
/// Requests that the script thread immediately send the constellation the title of a pipeline.
GetTitle(PipelineId),
/// Notifies script thread of a change to one of its document's activity
SetDocumentActivity(PipelineId, DocumentActivity),
/// Notifies script thread whether frame is visible
ChangeFrameVisibilityStatus(PipelineId, bool),
/// Notifies script thread that frame visibility change is complete
/// PipelineId is for the parent, FrameId is for the actual frame.
NotifyVisibilityChange(PipelineId, FrameId, bool),
/// Notifies script thread that a url should be loaded in this iframe.
/// PipelineId is for the parent, FrameId is for the actual frame.
Navigate(PipelineId, FrameId, LoadData, bool),
/// Post a message to a given window.
PostMessage(PipelineId, Option<ImmutableOrigin>, Vec<u8>),
/// Requests the script thread forward a mozbrowser event to an iframe it owns,
/// or to the window if no child frame id is provided.
MozBrowserEvent(PipelineId, Option<FrameId>, MozBrowserEvent),
/// Updates the current pipeline ID of a given iframe.
/// First PipelineId is for the parent, second is the new PipelineId for the frame.
UpdatePipelineId(PipelineId, FrameId, PipelineId),
/// Set an iframe to be focused. Used when an element in an iframe gains focus.
/// PipelineId is for the parent, FrameId is for the actual frame.
FocusIFrame(PipelineId, FrameId),
/// Passes a webdriver command to the script thread for execution
WebDriverScriptCommand(PipelineId, WebDriverScriptCommand),
/// Notifies script thread that all animations are done
TickAllAnimations(PipelineId),
/// Notifies the script thread of a transition end
TransitionEnd(UnsafeNode, String, f64),
/// Notifies the script thread that a new Web font has been loaded, and thus the page should be
/// reflowed.
WebFontLoaded(PipelineId),
/// Cause a `load` event to be dispatched at the appropriate frame element.
DispatchFrameLoadEvent {
/// The frame that has been marked as loaded.
target: FrameId,
/// The pipeline that contains a frame loading the target pipeline.
parent: PipelineId,
/// The pipeline that has completed loading.
child: PipelineId,
},
/// Cause a `storage` event to be dispatched at the appropriate window.
/// The strings are key, old value and new value.
DispatchStorageEvent(PipelineId, StorageType, ServoUrl, Option<String>, Option<String>, Option<String>),
/// Notifies a parent pipeline that one of its child frames is now active.
/// PipelineId is for the parent, FrameId is the child frame.
FramedContentChanged(PipelineId, FrameId),
/// Report an error from a CSS parser for the given pipeline
ReportCSSError(PipelineId, String, usize, usize, String),
/// Reload the given page.
Reload(PipelineId),
/// Notifies the script thread of a WebVR device event
WebVREvent(PipelineId, WebVREventMsg)
}
impl fmt::Debug for ConstellationControlMsg {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
use self::ConstellationControlMsg::*;
let variant = match *self {
AttachLayout(..) => "AttachLayout",
Resize(..) => "Resize",
ResizeInactive(..) => "ResizeInactive",
ExitPipeline(..) => "ExitPipeline",
ExitScriptThread => "ExitScriptThread",
SendEvent(..) => "SendEvent",
Viewport(..) => "Viewport",
SetScrollState(..) => "SetScrollState",
GetTitle(..) => "GetTitle",
SetDocumentActivity(..) => "SetDocumentActivity",
ChangeFrameVisibilityStatus(..) => "ChangeFrameVisibilityStatus",
NotifyVisibilityChange(..) => "NotifyVisibilityChange",
Navigate(..) => "Navigate",
PostMessage(..) => "PostMessage",
MozBrowserEvent(..) => "MozBrowserEvent",
UpdatePipelineId(..) => "UpdatePipelineId",
FocusIFrame(..) => "FocusIFrame",
WebDriverScriptCommand(..) => "WebDriverScriptCommand",
TickAllAnimations(..) => "TickAllAnimations",
TransitionEnd(..) => "TransitionEnd",
WebFontLoaded(..) => "WebFontLoaded",
DispatchFrameLoadEvent {.. } => "DispatchFrameLoadEvent",
DispatchStorageEvent(..) => "DispatchStorageEvent",
FramedContentChanged(..) => "FramedContentChanged",
ReportCSSError(..) => "ReportCSSError",
Reload(..) => "Reload",
WebVREvent(..) => "WebVREvent",
};
write!(formatter, "ConstellationMsg::{}", variant)
}
}
/// Used to determine if a script has any pending asynchronous activity.
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum DocumentState {
/// The document has been loaded and is idle.
Idle,
/// The document is either loading or waiting on an event.
Pending,
}
/// For a given pipeline, whether any animations are currently running
/// and any animation callbacks are queued
#[derive(Clone, Eq, PartialEq, Deserialize, Serialize, Debug)]
pub enum AnimationState {
/// Animations are active but no callbacks are queued
AnimationsPresent,
/// Animations are active and callbacks are queued
AnimationCallbacksPresent,
/// No animations are active and no callbacks are queued
NoAnimationsPresent,
/// No animations are active but callbacks are queued
NoAnimationCallbacksPresent,
}
/// The type of input represented by a multi-touch event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum TouchEventType {
/// A new touch point came in contact with the screen.
Down,
/// An existing touch point changed location.
Move,
/// A touch point was removed from the screen.
Up,
/// The system stopped tracking a touch point.
Cancel,
}
/// An opaque identifier for a touch point.
///
/// http://w3c.github.io/touch-events/#widl-Touch-identifier
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct TouchId(pub i32);
/// The mouse button involved in the event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum MouseButton {
/// The left mouse button.
Left,
/// The middle mouse button.
Middle,
/// The right mouse button.
Right,
}
/// The types of mouse events
#[derive(Deserialize, HeapSizeOf, Serialize)]
pub enum MouseEventType {
/// Mouse button clicked
Click,
/// Mouse button down
MouseDown,
/// Mouse button up
MouseUp,
}
/// Events from the compositor that the script thread needs to know about
#[derive(Deserialize, Serialize)]
pub enum CompositorEvent {
/// The window was resized.
ResizeEvent(WindowSizeData, WindowSizeType),
/// A mouse button state changed.
MouseButtonEvent(MouseEventType, MouseButton, Point2D<f32>),
/// The mouse was moved over a point (or was moved out of the recognizable region).
MouseMoveEvent(Option<Point2D<f32>>),
/// A touch event was generated with a touch ID and location.
TouchEvent(TouchEventType, TouchId, Point2D<f32>),
/// Touchpad pressure event
TouchpadPressureEvent(Point2D<f32>, f32, TouchpadPressurePhase),
/// A key was pressed.
KeyEvent(Option<char>, Key, KeyState, KeyModifiers),
}
/// Touchpad pressure phase for `TouchpadPressureEvent`.
#[derive(Copy, Clone, HeapSizeOf, PartialEq, Deserialize, Serialize)]
pub enum TouchpadPressurePhase {
/// Pressure before a regular click.
BeforeClick,
/// Pressure after a regular click.
AfterFirstClick,
/// Pressure after a "forceTouch" click
AfterSecondClick,
}
/// Requests a TimerEvent-Message be sent after the given duration.
#[derive(Deserialize, Serialize)]
pub struct TimerEventRequest(pub IpcSender<TimerEvent>, pub TimerSource, pub TimerEventId, pub MsDuration);
/// Type of messages that can be sent to the timer scheduler.
#[derive(Deserialize, Serialize)]
pub enum TimerSchedulerMsg {
/// Message to schedule a new timer event.
Request(TimerEventRequest),
/// Message to exit the timer scheduler.
Exit,
}
/// Notifies the script thread to fire due timers.
/// `TimerSource` must be `FromWindow` when dispatched to `ScriptThread` and
/// must be `FromWorker` when dispatched to a `DedicatedGlobalWorkerScope`
#[derive(Deserialize, Serialize)]
pub struct TimerEvent(pub TimerSource, pub TimerEventId);
/// Describes the thread that requested the TimerEvent.
#[derive(Copy, Clone, HeapSizeOf, Deserialize, Serialize)]
pub enum TimerSource {
/// The event was requested from a window (ScriptThread).
FromWindow(PipelineId),
/// The event was requested from a worker (DedicatedGlobalWorkerScope).
FromWorker,
}
/// The id to be used for a `TimerEvent` is defined by the corresponding `TimerEventRequest`.
#[derive(PartialEq, Eq, Copy, Clone, Debug, HeapSizeOf, Deserialize, Serialize)]
pub struct TimerEventId(pub u32);
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Milliseconds {}
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Nanoseconds {}
/// Amount of milliseconds.
pub type MsDuration = Length<u64, Milliseconds>;
/// Amount of nanoseconds.
pub type NsDuration = Length<u64, Nanoseconds>;
/// Returns the duration since an unspecified epoch measured in ms.
pub fn precise_time_ms() -> MsDuration {
Length::new(time::precise_time_ns() / (1000 * 1000))
}
/// Returns the duration since an unspecified epoch measured in ns.
pub fn precise_time_ns() -> NsDuration {
Length::new(time::precise_time_ns())
}
/// Data needed to construct a script thread.
///
/// NB: *DO NOT* add any Senders or Receivers here! pcwalton will have to rewrite your code if you
/// do! Use IPC senders and receivers instead.
pub struct InitialScriptState {
/// The ID of the pipeline with which this script thread is associated.
pub id: PipelineId,
/// The subpage ID of this pipeline to create in its pipeline parent.
/// If `None`, this is the root.
pub parent_info: Option<(PipelineId, FrameType)>,
/// The ID of the frame this script is part of.
pub frame_id: FrameId,
/// The ID of the top-level frame this script is part of.
pub top_level_frame_id: FrameId,
/// A channel with which messages can be sent to us (the script thread).
pub control_chan: IpcSender<ConstellationControlMsg>,
/// A port on which messages sent by the constellation to script can be received.
pub control_port: IpcReceiver<ConstellationControlMsg>,
/// A channel on which messages can be sent to the constellation from script.
pub constellation_chan: IpcSender<ScriptMsg>,
/// A sender for the layout thread to communicate to the constellation.
pub layout_to_constellation_chan: IpcSender<LayoutMsg>,
/// A channel to schedule timer events.
pub scheduler_chan: IpcSender<TimerSchedulerMsg>,
/// A channel to the resource manager thread.
pub resource_threads: ResourceThreads,
/// A channel to the bluetooth thread.
pub bluetooth_thread: IpcSender<BluetoothRequest>,
/// The image cache for this script thread.
pub image_cache: Arc<ImageCache>,
/// A channel to the time profiler thread.
pub time_profiler_chan: profile_traits::time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
/// A ping will be sent on this channel once the script thread shuts down.
pub content_process_shutdown_chan: IpcSender<()>,
/// A channel to the webvr thread, if available.
pub webvr_thread: Option<IpcSender<WebVRMsg>>
}
/// This trait allows creating a `ScriptThread` without depending on the `script`
/// crate.
pub trait ScriptThreadFactory {
/// Type of message sent from script to layout.
type Message;
/// Create a `ScriptThread`.
fn create(state: InitialScriptState, load_data: LoadData)
-> (Sender<Self::Message>, Receiver<Self::Message>);
}
/// Whether the sandbox attribute is present for an iframe element
#[derive(PartialEq, Eq, Copy, Clone, Debug, Deserialize, Serialize)]
pub enum IFrameSandboxState {
/// Sandbox attribute is present
IFrameSandboxed,
/// Sandbox attribute is not present
IFrameUnsandboxed,
}
/// Specifies the information required to load an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfo {
/// Pipeline ID of the parent of this iframe
pub parent_pipeline_id: PipelineId,
/// The ID for this iframe.
pub frame_id: FrameId,
/// The new pipeline ID that the iframe has generated.
pub new_pipeline_id: PipelineId,
/// Whether this iframe should be considered private
pub is_private: bool,
/// Whether this iframe is a mozbrowser iframe
pub frame_type: FrameType,
/// Wether this load should replace the current entry (reload). If true, the current
/// entry will be replaced instead of a new entry being added.
pub replace: bool,
}
/// Specifies the information required to load a URL in an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfoWithData {
/// The information required to load an iframe.
pub info: IFrameLoadInfo,
/// Load data containing the url to load
pub load_data: Option<LoadData>,
/// The old pipeline ID for this iframe, if a page was previously loaded.
pub old_pipeline_id: Option<PipelineId>,
/// Sandbox type of this iframe
pub sandbox: IFrameSandboxState,
}
// https://developer.mozilla.org/en-US/docs/Web/API/Using_the_Browser_API#Events
/// The events fired in a Browser API context (`<iframe mozbrowser>`)
#[derive(Deserialize, Serialize)]
pub enum MozBrowserEvent {
/// Sent when the scroll position within a browser `<iframe>` changes.
AsyncScroll,
/// Sent when window.close() is called within a browser `<iframe>`.
Close,
/// Sent when a browser `<iframe>` tries to open a context menu. This allows
/// handling `<menuitem>` element available within the browser `<iframe>`'s content.
ContextMenu,
/// Sent when an error occurred while trying to load content within a browser `<iframe>`.
/// Includes a human-readable description, and a machine-readable report.
Error(MozBrowserErrorType, String, String),
/// Sent when the favicon of a browser `<iframe>` changes.
IconChange(String, String, String),
/// Sent when the browser `<iframe>` has reached the server.
Connected,
/// Sent when the browser `<iframe>` has finished loading all its assets.
LoadEnd,
/// Sent when the browser `<iframe>` starts to load a new page.
LoadStart,
/// Sent when a browser `<iframe>`'s location changes.
LocationChange(String, bool, bool),
/// Sent when a new tab is opened within a browser `<iframe>` as a result of the user
/// issuing a command to open a link target in a new tab (for example ctrl/cmd + click.)
/// Includes the URL.
OpenTab(String),
/// Sent when a new window is opened within a browser `<iframe>`.
/// Includes the URL, target browsing context name, and features.
OpenWindow(String, Option<String>, Option<String>),
/// Sent when the SSL state changes within a browser `<iframe>`.
SecurityChange(HttpsState),
/// Sent when alert(), confirm(), or prompt() is called within a browser `<iframe>`.
ShowModalPrompt(String, String, String, String), // TODO(simartin): Handle unblock()
/// Sent when the document.title changes within a browser `<iframe>`.
TitleChange(String),
/// Sent when an HTTP authentification is requested.
UsernameAndPasswordRequired,
/// Sent when a link to a search engine is found.
OpenSearch,
/// Sent when visibility state changes.
VisibilityChange(bool),
}
impl MozBrowserEvent {
/// Get the name of the event as a `& str`
pub fn name(&self) -> &'static str {
match *self {
MozBrowserEvent::AsyncScroll => "mozbrowserasyncscroll",
MozBrowserEvent::Close => "mozbrowserclose",
MozBrowserEvent::Connected => "mozbrowserconnected",
MozBrowserEvent::ContextMenu => "mozbrowsercontextmenu",
MozBrowserEvent::Error(_, _, _) => "mozbrowsererror",
MozBrowserEvent::IconChange(_, _, _) => "mozbrowsericonchange",
MozBrowserEvent::LoadEnd => "mozbrowserloadend",
MozBrowserEvent::LoadStart => "mozbrowserloadstart",
MozBrowserEvent::LocationChange(_, _, _) => "mozbrowserlocationchange",
MozBrowserEvent::OpenTab(_) => "mozbrowseropentab",
MozBrowserEvent::OpenWindow(_, _, _) => "mozbrowseropenwindow",
MozBrowserEvent::SecurityChange(_) => "mozbrowsersecuritychange",
MozBrowserEvent::ShowModalPrompt(_, _, _, _) => "mozbrowsershowmodalprompt",
MozBrowserEvent::TitleChange(_) => "mozbrowsertitlechange",
MozBrowserEvent::UsernameAndPasswordRequired => "mozbrowserusernameandpasswordrequired",
MozBrowserEvent::OpenSearch => "mozbrowseropensearch",
MozBrowserEvent::VisibilityChange(_) => "mozbrowservisibilitychange",
}
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsererror
/// The different types of Browser error events
#[derive(Deserialize, Serialize)]
pub enum MozBrowserErrorType {
// For the moment, we are just reporting panics, using the "fatal" type.
/// A fatal error
Fatal,
}
impl MozBrowserErrorType {
/// Get the name of the error type as a `& str`
pub fn name(&self) -> &'static str |
}
/// Specifies whether the script or layout thread needs to be ticked for animation.
#[derive(Deserialize, Serialize)]
pub enum AnimationTickType {
/// The script thread.
Script,
/// The layout thread.
Layout,
}
/// The scroll state of a stacking context.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct StackingContextScrollState {
/// The ID of the scroll root.
pub scroll_root_id: ScrollRootId,
/// The scrolling offset of this stacking context.
pub scroll_offset: Point2D<f32>,
}
/// One hardware pixel.
///
/// This unit corresponds to the smallest addressable element of the display hardware.
#[derive(Copy, Clone, Debug)]
pub enum DevicePixel {}
/// Data about the window size.
#[derive(Copy, Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct WindowSizeData {
/// The size of the initial layout viewport, before parsing an
/// http://www.w3.org/TR/css-device-adapt/#initial-viewport
pub initial_viewport: TypedSize2D<f32, CSSPixel>,
/// The resolution of the window in dppx, not including any "pinch zoom" factor.
pub device_pixel_ratio: ScaleFactor<f32, CSSPixel, DevicePixel>,
}
/// The type of window size change.
#[derive(Deserialize, Eq, PartialEq, Serialize, Copy, Clone, HeapSizeOf)]
pub enum WindowSizeType {
/// Initial load.
Initial,
/// Window resize.
Resize,
}
/// Messages to the constellation originating from the WebDriver server.
#[derive(Deserialize, Serialize)]
pub enum WebDriverCommandMsg {
/// Get the window size.
GetWindowSize(PipelineId, IpcSender<WindowSizeData>),
/// Load a URL in the pipeline with the given ID.
LoadUrl(PipelineId, LoadData, IpcSender<LoadStatus>),
/// Refresh the pipeline with the given ID.
Refresh(PipelineId, IpcSender<LoadStatus>),
/// Pass a webdriver command to the script thread of the pipeline with the
/// given ID for execution.
| {
match *self {
MozBrowserErrorType::Fatal => "fatal",
}
} | identifier_body |
lib.rs |
use msg::constellation_msg::{PipelineId, PipelineNamespaceId, TraversalDirection};
use net_traits::{ReferrerPolicy, ResourceThreads};
use net_traits::image::base::Image;
use net_traits::image_cache::ImageCache;
use net_traits::response::HttpsState;
use net_traits::storage_thread::StorageType;
use profile_traits::mem;
use profile_traits::time as profile_time;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use servo_url::ImmutableOrigin;
use servo_url::ServoUrl;
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender};
use style_traits::{CSSPixel, UnsafeNode};
use webdriver_msg::{LoadStatus, WebDriverScriptCommand};
use webvr_traits::{WebVRDisplayEvent, WebVRMsg};
pub use script_msg::{LayoutMsg, ScriptMsg, EventResult, LogEntry};
pub use script_msg::{ServiceWorkerMsg, ScopeThings, SWManagerMsg, SWManagerSenders, DOMMessage};
/// The address of a node. Layout sends these back. They must be validated via
/// `from_untrusted_node_address` before they can be used, because we do not trust layout.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct UntrustedNodeAddress(pub *const c_void);
impl HeapSizeOf for UntrustedNodeAddress {
fn heap_size_of_children(&self) -> usize {
0
}
}
#[allow(unsafe_code)]
unsafe impl Send for UntrustedNodeAddress {}
impl Serialize for UntrustedNodeAddress {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
(self.0 as usize).serialize(s)
}
}
impl Deserialize for UntrustedNodeAddress {
fn deserialize<D: Deserializer>(d: D) -> Result<UntrustedNodeAddress, D::Error> {
let value: usize = try!(Deserialize::deserialize(d));
Ok(UntrustedNodeAddress::from_id(value))
}
}
impl UntrustedNodeAddress {
/// Creates an `UntrustedNodeAddress` from the given pointer address value.
#[inline]
pub fn from_id(id: usize) -> UntrustedNodeAddress {
UntrustedNodeAddress(id as *const c_void)
}
}
/// Messages sent to the layout thread from the constellation and/or compositor.
#[derive(Deserialize, Serialize)]
pub enum LayoutControlMsg {
/// Requests that this layout thread exit.
ExitNow,
/// Requests the current epoch (layout counter) from this layout.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks layout to run another step in its animation.
TickAnimations,
/// Tells layout about the new scrolling offsets of each scrollable stacking context.
SetStackingContextScrollStates(Vec<StackingContextScrollState>),
/// Requests the current load state of Web fonts. `true` is returned if fonts are still loading
/// and `false` is returned if all fonts have loaded.
GetWebFontLoadState(IpcSender<bool>),
}
/// can be passed to `LoadUrl` to load a page with GET/POST
/// parameters or headers
#[derive(Clone, Deserialize, Serialize)]
pub struct LoadData {
/// The URL.
pub url: ServoUrl,
/// The method.
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub method: Method,
/// The headers.
#[serde(deserialize_with = "::hyper_serde::deserialize",
serialize_with = "::hyper_serde::serialize")]
pub headers: Headers,
/// The data.
pub data: Option<Vec<u8>>,
/// The referrer policy.
pub referrer_policy: Option<ReferrerPolicy>,
/// The referrer URL.
pub referrer_url: Option<ServoUrl>,
}
impl LoadData {
/// Create a new `LoadData` object.
pub fn new(url: ServoUrl, referrer_policy: Option<ReferrerPolicy>, referrer_url: Option<ServoUrl>) -> LoadData {
LoadData {
url: url,
method: Method::Get,
headers: Headers::new(),
data: None,
referrer_policy: referrer_policy,
referrer_url: referrer_url,
}
}
}
/// The initial data required to create a new layout attached to an existing script thread.
#[derive(Deserialize, Serialize)]
pub struct NewLayoutInfo {
/// The ID of the parent pipeline and frame type, if any.
/// If `None`, this is a root pipeline.
pub parent_info: Option<(PipelineId, FrameType)>,
/// Id of the newly-created pipeline.
pub new_pipeline_id: PipelineId,
/// Id of the frame associated with this pipeline.
pub frame_id: FrameId,
/// Network request data which will be initiated by the script thread.
pub load_data: LoadData,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// A port on which layout can receive messages from the pipeline.
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
/// A shutdown channel so that layout can tell the content process to shut down when it's done.
pub content_process_shutdown_chan: Option<IpcSender<()>>,
/// Number of threads to use for layout.
pub layout_threads: usize,
}
/// When a pipeline is closed, should its browsing context be discarded too?
#[derive(Copy, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)]
pub enum DiscardBrowsingContext {
/// Discard the browsing context
Yes,
/// Don't discard the browsing context
No,
}
/// Is a document fully active, active or inactive?
/// A document is active if it is the current active document in its session history,
/// it is fuly active if it is active and all of its ancestors are active,
/// and it is inactive otherwise.
/// https://html.spec.whatwg.org/multipage/#active-document
/// https://html.spec.whatwg.org/multipage/#fully-active
#[derive(Copy, Clone, PartialEq, Eq, Hash, HeapSizeOf, Debug, Deserialize, Serialize)]
pub enum DocumentActivity {
/// An inactive document
Inactive,
/// An active but not fully active document
Active,
/// A fully active document
FullyActive,
}
/// Messages sent from the constellation or layout to the script thread.
#[derive(Deserialize, Serialize)]
pub enum ConstellationControlMsg {
/// Gives a channel and ID to a layout thread, as well as the ID of that layout's parent
AttachLayout(NewLayoutInfo),
/// Window resized. Sends a DOM event eventually, but first we combine events.
Resize(PipelineId, WindowSizeData, WindowSizeType),
/// Notifies script that window has been resized but to not take immediate action.
ResizeInactive(PipelineId, WindowSizeData),
/// Notifies the script that a pipeline should be closed.
ExitPipeline(PipelineId, DiscardBrowsingContext),
/// Notifies the script that the whole thread should be closed.
ExitScriptThread,
/// Sends a DOM event.
SendEvent(PipelineId, CompositorEvent),
/// Notifies script of the viewport.
Viewport(PipelineId, Rect<f32>),
/// Notifies script of a new set of scroll offsets.
SetScrollState(PipelineId, Vec<(UntrustedNodeAddress, Point2D<f32>)>),
/// Requests that the script thread immediately send the constellation the title of a pipeline.
GetTitle(PipelineId),
/// Notifies script thread of a change to one of its document's activity
SetDocumentActivity(PipelineId, DocumentActivity),
/// Notifies script thread whether frame is visible
ChangeFrameVisibilityStatus(PipelineId, bool),
/// Notifies script thread that frame visibility change is complete
/// PipelineId is for the parent, FrameId is for the actual frame.
NotifyVisibilityChange(PipelineId, FrameId, bool),
/// Notifies script thread that a url should be loaded in this iframe.
/// PipelineId is for the parent, FrameId is for the actual frame.
Navigate(PipelineId, FrameId, LoadData, bool),
/// Post a message to a given window.
PostMessage(PipelineId, Option<ImmutableOrigin>, Vec<u8>),
/// Requests the script thread forward a mozbrowser event to an iframe it owns,
/// or to the window if no child frame id is provided.
MozBrowserEvent(PipelineId, Option<FrameId>, MozBrowserEvent),
/// Updates the current pipeline ID of a given iframe.
/// First PipelineId is for the parent, second is the new PipelineId for the frame.
UpdatePipelineId(PipelineId, FrameId, PipelineId),
/// Set an iframe to be focused. Used when an element in an iframe gains focus.
/// PipelineId is for the parent, FrameId is for the actual frame.
FocusIFrame(PipelineId, FrameId),
/// Passes a webdriver command to the script thread for execution
WebDriverScriptCommand(PipelineId, WebDriverScriptCommand),
/// Notifies script thread that all animations are done
TickAllAnimations(PipelineId),
/// Notifies the script thread of a transition end
TransitionEnd(UnsafeNode, String, f64),
/// Notifies the script thread that a new Web font has been loaded, and thus the page should be
/// reflowed.
WebFontLoaded(PipelineId),
/// Cause a `load` event to be dispatched at the appropriate frame element.
DispatchFrameLoadEvent {
/// The frame that has been marked as loaded.
target: FrameId,
/// The pipeline that contains a frame loading the target pipeline.
parent: PipelineId,
/// The pipeline that has completed loading.
child: PipelineId,
},
/// Cause a `storage` event to be dispatched at the appropriate window.
/// The strings are key, old value and new value.
DispatchStorageEvent(PipelineId, StorageType, ServoUrl, Option<String>, Option<String>, Option<String>),
/// Notifies a parent pipeline that one of its child frames is now active.
/// PipelineId is for the parent, FrameId is the child frame.
FramedContentChanged(PipelineId, FrameId),
/// Report an error from a CSS parser for the given pipeline
ReportCSSError(PipelineId, String, usize, usize, String),
/// Reload the given page.
Reload(PipelineId),
/// Notifies the script thread of a WebVR device event
WebVREvent(PipelineId, WebVREventMsg)
}
impl fmt::Debug for ConstellationControlMsg {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
use self::ConstellationControlMsg::*;
let variant = match *self {
AttachLayout(..) => "AttachLayout",
Resize(..) => "Resize",
ResizeInactive(..) => "ResizeInactive",
ExitPipeline(..) => "ExitPipeline",
ExitScriptThread => "ExitScriptThread",
SendEvent(..) => "SendEvent",
Viewport(..) => "Viewport",
SetScrollState(..) => "SetScrollState",
GetTitle(..) => "GetTitle",
SetDocumentActivity(..) => "SetDocumentActivity",
ChangeFrameVisibilityStatus(..) => "ChangeFrameVisibilityStatus",
NotifyVisibilityChange(..) => "NotifyVisibilityChange",
Navigate(..) => "Navigate",
PostMessage(..) => "PostMessage",
MozBrowserEvent(..) => "MozBrowserEvent",
UpdatePipelineId(..) => "UpdatePipelineId",
FocusIFrame(..) => "FocusIFrame",
WebDriverScriptCommand(..) => "WebDriverScriptCommand",
TickAllAnimations(..) => "TickAllAnimations",
TransitionEnd(..) => "TransitionEnd",
WebFontLoaded(..) => "WebFontLoaded",
DispatchFrameLoadEvent {.. } => "DispatchFrameLoadEvent",
DispatchStorageEvent(..) => "DispatchStorageEvent",
FramedContentChanged(..) => "FramedContentChanged",
ReportCSSError(..) => "ReportCSSError",
Reload(..) => "Reload",
WebVREvent(..) => "WebVREvent",
};
write!(formatter, "ConstellationMsg::{}", variant)
}
}
/// Used to determine if a script has any pending asynchronous activity.
#[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum DocumentState {
/// The document has been loaded and is idle.
Idle,
/// The document is either loading or waiting on an event.
Pending,
}
/// For a given pipeline, whether any animations are currently running
/// and any animation callbacks are queued
#[derive(Clone, Eq, PartialEq, Deserialize, Serialize, Debug)]
pub enum AnimationState {
/// Animations are active but no callbacks are queued
AnimationsPresent,
/// Animations are active and callbacks are queued
AnimationCallbacksPresent,
/// No animations are active and no callbacks are queued
NoAnimationsPresent,
/// No animations are active but callbacks are queued
NoAnimationCallbacksPresent,
}
/// The type of input represented by a multi-touch event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum TouchEventType {
/// A new touch point came in contact with the screen.
Down,
/// An existing touch point changed location.
Move,
/// A touch point was removed from the screen.
Up,
/// The system stopped tracking a touch point.
Cancel,
}
/// An opaque identifier for a touch point.
///
/// http://w3c.github.io/touch-events/#widl-Touch-identifier
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
pub struct TouchId(pub i32);
/// The mouse button involved in the event.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum MouseButton {
/// The left mouse button.
Left,
/// The middle mouse button.
Middle,
/// The right mouse button.
Right,
}
/// The types of mouse events
#[derive(Deserialize, HeapSizeOf, Serialize)]
pub enum | {
/// Mouse button clicked
Click,
/// Mouse button down
MouseDown,
/// Mouse button up
MouseUp,
}
/// Events from the compositor that the script thread needs to know about
#[derive(Deserialize, Serialize)]
pub enum CompositorEvent {
/// The window was resized.
ResizeEvent(WindowSizeData, WindowSizeType),
/// A mouse button state changed.
MouseButtonEvent(MouseEventType, MouseButton, Point2D<f32>),
/// The mouse was moved over a point (or was moved out of the recognizable region).
MouseMoveEvent(Option<Point2D<f32>>),
/// A touch event was generated with a touch ID and location.
TouchEvent(TouchEventType, TouchId, Point2D<f32>),
/// Touchpad pressure event
TouchpadPressureEvent(Point2D<f32>, f32, TouchpadPressurePhase),
/// A key was pressed.
KeyEvent(Option<char>, Key, KeyState, KeyModifiers),
}
/// Touchpad pressure phase for `TouchpadPressureEvent`.
#[derive(Copy, Clone, HeapSizeOf, PartialEq, Deserialize, Serialize)]
pub enum TouchpadPressurePhase {
/// Pressure before a regular click.
BeforeClick,
/// Pressure after a regular click.
AfterFirstClick,
/// Pressure after a "forceTouch" click
AfterSecondClick,
}
/// Requests a TimerEvent-Message be sent after the given duration.
#[derive(Deserialize, Serialize)]
pub struct TimerEventRequest(pub IpcSender<TimerEvent>, pub TimerSource, pub TimerEventId, pub MsDuration);
/// Type of messages that can be sent to the timer scheduler.
#[derive(Deserialize, Serialize)]
pub enum TimerSchedulerMsg {
/// Message to schedule a new timer event.
Request(TimerEventRequest),
/// Message to exit the timer scheduler.
Exit,
}
/// Notifies the script thread to fire due timers.
/// `TimerSource` must be `FromWindow` when dispatched to `ScriptThread` and
/// must be `FromWorker` when dispatched to a `DedicatedGlobalWorkerScope`
#[derive(Deserialize, Serialize)]
pub struct TimerEvent(pub TimerSource, pub TimerEventId);
/// Describes the thread that requested the TimerEvent.
#[derive(Copy, Clone, HeapSizeOf, Deserialize, Serialize)]
pub enum TimerSource {
/// The event was requested from a window (ScriptThread).
FromWindow(PipelineId),
/// The event was requested from a worker (DedicatedGlobalWorkerScope).
FromWorker,
}
/// The id to be used for a `TimerEvent` is defined by the corresponding `TimerEventRequest`.
#[derive(PartialEq, Eq, Copy, Clone, Debug, HeapSizeOf, Deserialize, Serialize)]
pub struct TimerEventId(pub u32);
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Milliseconds {}
/// Unit of measurement.
#[derive(Clone, Copy, HeapSizeOf)]
pub enum Nanoseconds {}
/// Amount of milliseconds.
pub type MsDuration = Length<u64, Milliseconds>;
/// Amount of nanoseconds.
pub type NsDuration = Length<u64, Nanoseconds>;
/// Returns the duration since an unspecified epoch measured in ms.
pub fn precise_time_ms() -> MsDuration {
Length::new(time::precise_time_ns() / (1000 * 1000))
}
/// Returns the duration since an unspecified epoch measured in ns.
pub fn precise_time_ns() -> NsDuration {
Length::new(time::precise_time_ns())
}
/// Data needed to construct a script thread.
///
/// NB: *DO NOT* add any Senders or Receivers here! pcwalton will have to rewrite your code if you
/// do! Use IPC senders and receivers instead.
pub struct InitialScriptState {
/// The ID of the pipeline with which this script thread is associated.
pub id: PipelineId,
/// The subpage ID of this pipeline to create in its pipeline parent.
/// If `None`, this is the root.
pub parent_info: Option<(PipelineId, FrameType)>,
/// The ID of the frame this script is part of.
pub frame_id: FrameId,
/// The ID of the top-level frame this script is part of.
pub top_level_frame_id: FrameId,
/// A channel with which messages can be sent to us (the script thread).
pub control_chan: IpcSender<ConstellationControlMsg>,
/// A port on which messages sent by the constellation to script can be received.
pub control_port: IpcReceiver<ConstellationControlMsg>,
/// A channel on which messages can be sent to the constellation from script.
pub constellation_chan: IpcSender<ScriptMsg>,
/// A sender for the layout thread to communicate to the constellation.
pub layout_to_constellation_chan: IpcSender<LayoutMsg>,
/// A channel to schedule timer events.
pub scheduler_chan: IpcSender<TimerSchedulerMsg>,
/// A channel to the resource manager thread.
pub resource_threads: ResourceThreads,
/// A channel to the bluetooth thread.
pub bluetooth_thread: IpcSender<BluetoothRequest>,
/// The image cache for this script thread.
pub image_cache: Arc<ImageCache>,
/// A channel to the time profiler thread.
pub time_profiler_chan: profile_traits::time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
/// Information about the initial window size.
pub window_size: Option<WindowSizeData>,
/// The ID of the pipeline namespace for this script thread.
pub pipeline_namespace_id: PipelineNamespaceId,
/// A ping will be sent on this channel once the script thread shuts down.
pub content_process_shutdown_chan: IpcSender<()>,
/// A channel to the webvr thread, if available.
pub webvr_thread: Option<IpcSender<WebVRMsg>>
}
/// This trait allows creating a `ScriptThread` without depending on the `script`
/// crate.
pub trait ScriptThreadFactory {
/// Type of message sent from script to layout.
type Message;
/// Create a `ScriptThread`.
fn create(state: InitialScriptState, load_data: LoadData)
-> (Sender<Self::Message>, Receiver<Self::Message>);
}
/// Whether the sandbox attribute is present for an iframe element
#[derive(PartialEq, Eq, Copy, Clone, Debug, Deserialize, Serialize)]
pub enum IFrameSandboxState {
/// Sandbox attribute is present
IFrameSandboxed,
/// Sandbox attribute is not present
IFrameUnsandboxed,
}
/// Specifies the information required to load an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfo {
/// Pipeline ID of the parent of this iframe
pub parent_pipeline_id: PipelineId,
/// The ID for this iframe.
pub frame_id: FrameId,
/// The new pipeline ID that the iframe has generated.
pub new_pipeline_id: PipelineId,
/// Whether this iframe should be considered private
pub is_private: bool,
/// Whether this iframe is a mozbrowser iframe
pub frame_type: FrameType,
/// Wether this load should replace the current entry (reload). If true, the current
/// entry will be replaced instead of a new entry being added.
pub replace: bool,
}
/// Specifies the information required to load a URL in an iframe.
#[derive(Deserialize, Serialize)]
pub struct IFrameLoadInfoWithData {
/// The information required to load an iframe.
pub info: IFrameLoadInfo,
/// Load data containing the url to load
pub load_data: Option<LoadData>,
/// The old pipeline ID for this iframe, if a page was previously loaded.
pub old_pipeline_id: Option<PipelineId>,
/// Sandbox type of this iframe
pub sandbox: IFrameSandboxState,
}
// https://developer.mozilla.org/en-US/docs/Web/API/Using_the_Browser_API#Events
/// The events fired in a Browser API context (`<iframe mozbrowser>`)
#[derive(Deserialize, Serialize)]
pub enum MozBrowserEvent {
/// Sent when the scroll position within a browser `<iframe>` changes.
AsyncScroll,
/// Sent when window.close() is called within a browser `<iframe>`.
Close,
/// Sent when a browser `<iframe>` tries to open a context menu. This allows
/// handling `<menuitem>` element available within the browser `<iframe>`'s content.
ContextMenu,
/// Sent when an error occurred while trying to load content within a browser `<iframe>`.
/// Includes a human-readable description, and a machine-readable report.
Error(MozBrowserErrorType, String, String),
/// Sent when the favicon of a browser `<iframe>` changes.
IconChange(String, String, String),
/// Sent when the browser `<iframe>` has reached the server.
Connected,
/// Sent when the browser `<iframe>` has finished loading all its assets.
LoadEnd,
/// Sent when the browser `<iframe>` starts to load a new page.
LoadStart,
/// Sent when a browser `<iframe>`'s location changes.
LocationChange(String, bool, bool),
/// Sent when a new tab is opened within a browser `<iframe>` as a result of the user
/// issuing a command to open a link target in a new tab (for example ctrl/cmd + click.)
/// Includes the URL.
OpenTab(String),
/// Sent when a new window is opened within a browser `<iframe>`.
/// Includes the URL, target browsing context name, and features.
OpenWindow(String, Option<String>, Option<String>),
/// Sent when the SSL state changes within a browser `<iframe>`.
SecurityChange(HttpsState),
/// Sent when alert(), confirm(), or prompt() is called within a browser `<iframe>`.
ShowModalPrompt(String, String, String, String), // TODO(simartin): Handle unblock()
/// Sent when the document.title changes within a browser `<iframe>`.
TitleChange(String),
/// Sent when an HTTP authentification is requested.
UsernameAndPasswordRequired,
/// Sent when a link to a search engine is found.
OpenSearch,
/// Sent when visibility state changes.
VisibilityChange(bool),
}
impl MozBrowserEvent {
/// Get the name of the event as a `& str`
pub fn name(&self) -> &'static str {
match *self {
MozBrowserEvent::AsyncScroll => "mozbrowserasyncscroll",
MozBrowserEvent::Close => "mozbrowserclose",
MozBrowserEvent::Connected => "mozbrowserconnected",
MozBrowserEvent::ContextMenu => "mozbrowsercontextmenu",
MozBrowserEvent::Error(_, _, _) => "mozbrowsererror",
MozBrowserEvent::IconChange(_, _, _) => "mozbrowsericonchange",
MozBrowserEvent::LoadEnd => "mozbrowserloadend",
MozBrowserEvent::LoadStart => "mozbrowserloadstart",
MozBrowserEvent::LocationChange(_, _, _) => "mozbrowserlocationchange",
MozBrowserEvent::OpenTab(_) => "mozbrowseropentab",
MozBrowserEvent::OpenWindow(_, _, _) => "mozbrowseropenwindow",
MozBrowserEvent::SecurityChange(_) => "mozbrowsersecuritychange",
MozBrowserEvent::ShowModalPrompt(_, _, _, _) => "mozbrowsershowmodalprompt",
MozBrowserEvent::TitleChange(_) => "mozbrowsertitlechange",
MozBrowserEvent::UsernameAndPasswordRequired => "mozbrowserusernameandpasswordrequired",
MozBrowserEvent::OpenSearch => "mozbrowseropensearch",
MozBrowserEvent::VisibilityChange(_) => "mozbrowservisibilitychange",
}
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsererror
/// The different types of Browser error events
#[derive(Deserialize, Serialize)]
pub enum MozBrowserErrorType {
// For the moment, we are just reporting panics, using the "fatal" type.
/// A fatal error
Fatal,
}
impl MozBrowserErrorType {
/// Get the name of the error type as a `& str`
pub fn name(&self) -> &'static str {
match *self {
MozBrowserErrorType::Fatal => "fatal",
}
}
}
/// Specifies whether the script or layout thread needs to be ticked for animation.
#[derive(Deserialize, Serialize)]
pub enum AnimationTickType {
/// The script thread.
Script,
/// The layout thread.
Layout,
}
/// The scroll state of a stacking context.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub struct StackingContextScrollState {
/// The ID of the scroll root.
pub scroll_root_id: ScrollRootId,
/// The scrolling offset of this stacking context.
pub scroll_offset: Point2D<f32>,
}
/// One hardware pixel.
///
/// This unit corresponds to the smallest addressable element of the display hardware.
#[derive(Copy, Clone, Debug)]
pub enum DevicePixel {}
/// Data about the window size.
#[derive(Copy, Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct WindowSizeData {
/// The size of the initial layout viewport, before parsing an
/// http://www.w3.org/TR/css-device-adapt/#initial-viewport
pub initial_viewport: TypedSize2D<f32, CSSPixel>,
/// The resolution of the window in dppx, not including any "pinch zoom" factor.
pub device_pixel_ratio: ScaleFactor<f32, CSSPixel, DevicePixel>,
}
/// The type of window size change.
#[derive(Deserialize, Eq, PartialEq, Serialize, Copy, Clone, HeapSizeOf)]
pub enum WindowSizeType {
/// Initial load.
Initial,
/// Window resize.
Resize,
}
/// Messages to the constellation originating from the WebDriver server.
#[derive(Deserialize, Serialize)]
pub enum WebDriverCommandMsg {
/// Get the window size.
GetWindowSize(PipelineId, IpcSender<WindowSizeData>),
/// Load a URL in the pipeline with the given ID.
LoadUrl(PipelineId, LoadData, IpcSender<LoadStatus>),
/// Refresh the pipeline with the given ID.
Refresh(PipelineId, IpcSender<LoadStatus>),
/// Pass a webdriver command to the script thread of the pipeline with the
/// given ID for execution.
| MouseEventType | identifier_name |
twitch.rs | use std;
extern crate serde_json;
extern crate serde;
use std::io::Read;
use emote;
use emote::{EmoteError,JsonError};
use http;
const GLOBAL_URL:&'static str = "https://api.twitch.tv/kraken/chat/emoticon_images?emotesets=0";
pub struct Emote<'a>{
http: &'a http::Http,
}
#[derive(Serialize, Deserialize,Default)]
pub struct Set{
id: u64,
code: String,
}
impl emote::Emote for Set{
fn name(&self)->&str{
&self.code
}
}
impl <'a>Emote<'a>{
pub fn new(http:&http::Http)->Result<Emote,String>{
Ok(Emote{
http
})
}
pub fn get_global(&self)->Result<Vec<Set>,EmoteError>{
//let s = try!(self.http.get_body_string(GLOBAL_URL).map_err(EmoteError::Http));
let mut file = match std::fs::File::open("twitch_global.json"){
Ok(a)=>a,
Err(_)=>panic!("could not open twitch global file"),
};
let mut s = String::new();
file.read_to_string(&mut s);
| };
let mut emotes = Vec::new();
for set in sets{
let emote = try_map!(serde_json::from_value(set),EmoteError::Json(JsonError::ParseError));
emotes.push(emote);
}
Ok(emotes)
}
}
#[cfg(test)]
mod test {
#[test]
fn twitch_test(){
let http_client = http::Http::new().unwrap();
let mut twitch = Emote::new(&http_client).unwrap();
}
} | let v:serde_json::Value = try_map!(serde_json::from_str(&s),EmoteError::Json(JsonError::ParseError));
let emoticon_sets = &v["emoticon_sets"];
let mut sets = match emoticon_sets["0"].as_array(){
Some(a)=>a.clone(),
None=>return Err(EmoteError::Json(JsonError::ParseError)), | random_line_split |
twitch.rs | use std;
extern crate serde_json;
extern crate serde;
use std::io::Read;
use emote;
use emote::{EmoteError,JsonError};
use http;
const GLOBAL_URL:&'static str = "https://api.twitch.tv/kraken/chat/emoticon_images?emotesets=0";
pub struct | <'a>{
http: &'a http::Http,
}
#[derive(Serialize, Deserialize,Default)]
pub struct Set{
id: u64,
code: String,
}
impl emote::Emote for Set{
fn name(&self)->&str{
&self.code
}
}
impl <'a>Emote<'a>{
pub fn new(http:&http::Http)->Result<Emote,String>{
Ok(Emote{
http
})
}
pub fn get_global(&self)->Result<Vec<Set>,EmoteError>{
//let s = try!(self.http.get_body_string(GLOBAL_URL).map_err(EmoteError::Http));
let mut file = match std::fs::File::open("twitch_global.json"){
Ok(a)=>a,
Err(_)=>panic!("could not open twitch global file"),
};
let mut s = String::new();
file.read_to_string(&mut s);
let v:serde_json::Value = try_map!(serde_json::from_str(&s),EmoteError::Json(JsonError::ParseError));
let emoticon_sets = &v["emoticon_sets"];
let mut sets = match emoticon_sets["0"].as_array(){
Some(a)=>a.clone(),
None=>return Err(EmoteError::Json(JsonError::ParseError)),
};
let mut emotes = Vec::new();
for set in sets{
let emote = try_map!(serde_json::from_value(set),EmoteError::Json(JsonError::ParseError));
emotes.push(emote);
}
Ok(emotes)
}
}
#[cfg(test)]
mod test {
#[test]
fn twitch_test(){
let http_client = http::Http::new().unwrap();
let mut twitch = Emote::new(&http_client).unwrap();
}
}
| Emote | identifier_name |
nodept.rs | pub type Node = usize;
#[derive(Debug, Copy, Clone)]
pub struct NodePt {
pub id: Node,
pub x: f64,
pub y: f64,
}
impl NodePt {
pub fn | (node_id: Node, x: f64, y: f64) -> NodePt {
NodePt {
id: node_id,
x: x,
y: y,
}
}
pub fn distance_to(self, other: NodePt) -> f64 {
let xx = (self.x - other.x) * (self.x - other.x);
let yy = (self.y - other.y) * (self.y - other.y);
(xx + yy).sqrt().round()
}
}
impl PartialEq for NodePt {
fn eq(&self, other: &NodePt) -> bool {
self.id == other.id
}
fn ne(&self, other: &NodePt) -> bool {
self.id!= other.id
}
}
| new | identifier_name |
nodept.rs | pub type Node = usize;
#[derive(Debug, Copy, Clone)]
pub struct NodePt {
pub id: Node,
pub x: f64,
pub y: f64,
}
impl NodePt {
pub fn new(node_id: Node, x: f64, y: f64) -> NodePt {
NodePt { | }
}
pub fn distance_to(self, other: NodePt) -> f64 {
let xx = (self.x - other.x) * (self.x - other.x);
let yy = (self.y - other.y) * (self.y - other.y);
(xx + yy).sqrt().round()
}
}
impl PartialEq for NodePt {
fn eq(&self, other: &NodePt) -> bool {
self.id == other.id
}
fn ne(&self, other: &NodePt) -> bool {
self.id!= other.id
}
} | id: node_id,
x: x,
y: y, | random_line_split |
mod.rs | // "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/vfs/mod.rs
//! Virtual File System
#[allow(unused_imports)]
use prelude::*;
use metadevs::storage::VolumeHandle;
module_define!(VFS, [], init);
pub type Result<T> = ::core::result::Result<T,Error>;
#[derive(Debug)]
pub enum Error
{
/// File not found
NotFound,
/// Permission denied
PermissionDenied,
/// File exclusively locked
Locked,
/// Node was not the requested type
TypeMismatch,
/// A component of the path was not a directory
NonDirComponent,
/// Symbolic link recursion limit reached
RecursionDepthExceeded,
/// Block-level IO Error
BlockIoError(::metadevs::storage::IoError),
/// Path was malformed (too long, not absolute, not normalised,... depends)
MalformedPath,
/// Unknown (misc) error
Unknown(&'static str),
}
impl From<::metadevs::storage::IoError> for Error {
fn | (v: ::metadevs::storage::IoError) -> Error {
Error::BlockIoError(v)
}
}
pub use self::path::{Path,PathBuf};
pub mod node;
pub mod mount;
pub mod handle;
mod path;
mod ramfs;
fn init()
{
// 1. Initialise global structures
mount::init();
node::init();
ramfs::init();
// 2. Start the root/builtin filesystems
mount::mount("/".as_ref(), VolumeHandle::new_ramdisk(0), "ramfs", &[]).unwrap();//"Unable to mount /");
// 3. Initialise root filesystem layout
let root = match handle::Dir::open( Path::new("/") )
{
Ok(v) => v,
Err(e) => panic!("BUG - Opening '/' failed: {:?}", e),
};
root.mkdir("system").unwrap();
root.mkdir("volumes").unwrap();
root.mkdir("temp").unwrap();
}
| from | identifier_name |
mod.rs | // "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/vfs/mod.rs
//! Virtual File System
#[allow(unused_imports)]
use prelude::*;
use metadevs::storage::VolumeHandle;
module_define!(VFS, [], init);
pub type Result<T> = ::core::result::Result<T,Error>;
#[derive(Debug)]
pub enum Error
{
/// File not found
NotFound,
/// Permission denied
PermissionDenied,
/// File exclusively locked
Locked,
/// Node was not the requested type
TypeMismatch,
/// A component of the path was not a directory
NonDirComponent,
/// Symbolic link recursion limit reached
RecursionDepthExceeded,
/// Block-level IO Error
BlockIoError(::metadevs::storage::IoError),
/// Path was malformed (too long, not absolute, not normalised,... depends)
MalformedPath,
/// Unknown (misc) error
Unknown(&'static str),
}
impl From<::metadevs::storage::IoError> for Error {
fn from(v: ::metadevs::storage::IoError) -> Error {
Error::BlockIoError(v)
}
}
pub use self::path::{Path,PathBuf};
pub mod node;
pub mod mount;
pub mod handle;
mod path;
mod ramfs;
fn init()
{
// 1. Initialise global structures
mount::init();
node::init();
ramfs::init();
// 2. Start the root/builtin filesystems
mount::mount("/".as_ref(), VolumeHandle::new_ramdisk(0), "ramfs", &[]).unwrap();//"Unable to mount /");
// 3. Initialise root filesystem layout
let root = match handle::Dir::open( Path::new("/") ) | Err(e) => panic!("BUG - Opening '/' failed: {:?}", e),
};
root.mkdir("system").unwrap();
root.mkdir("volumes").unwrap();
root.mkdir("temp").unwrap();
} | {
Ok(v) => v, | random_line_split |
nested-class.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn | () {
struct b {
i: int,
}
impl b {
fn do_stuff(&self) -> int { return 37; }
}
fn b(i:int) -> b {
b {
i: i
}
}
// fn b(x:int) -> int { panic!(); }
let z = b(42);
assert_eq!(z.i, 42);
assert_eq!(z.do_stuff(), 37);
}
| main | identifier_name |
nested-class.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
struct b {
i: int,
}
impl b {
fn do_stuff(&self) -> int { return 37; }
}
fn b(i:int) -> b |
// fn b(x:int) -> int { panic!(); }
let z = b(42);
assert_eq!(z.i, 42);
assert_eq!(z.do_stuff(), 37);
}
| {
b {
i: i
}
} | identifier_body |
nested-class.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() { | struct b {
i: int,
}
impl b {
fn do_stuff(&self) -> int { return 37; }
}
fn b(i:int) -> b {
b {
i: i
}
}
// fn b(x:int) -> int { panic!(); }
let z = b(42);
assert_eq!(z.i, 42);
assert_eq!(z.do_stuff(), 37);
} | random_line_split |
|
mod.rs | use {Function, Functions, Context, Contexts, Compiled, Value};
use tree::Tree;
use error::Error;
use serde::Serialize;
use to_value;
use std::fmt;
/// Expression builder
pub struct Expr {
expression: String,
compiled: Option<Compiled>,
functions: Functions,
contexts: Contexts,
}
impl Expr {
/// Create an expression.
pub fn new<T: Into<String>>(expr: T) -> Expr {
Expr {
expression: expr.into(),
compiled: None,
functions: Functions::new(),
contexts: create_empty_contexts(),
}
}
/// Set function.
pub fn function<T, F>(mut self, name: T, function: F) -> Expr
where T: Into<String>,
F:'static + Fn(Vec<Value>) -> Result<Value, Error> + Sync + Send
{
self.functions.insert(name.into(), Function::new(function));
self
}
/// Set value.
pub fn value<T, V>(mut self, name: T, value: V) -> Expr
where T: Into<String>,
V: Serialize
{
self.contexts.last_mut().unwrap().insert(name.into(), to_value(value));
self
}
/// Compile an expression.
/// An expression can be compiled only once and then invoked multiple times with different context and function.
/// You can also execute a expression without compile.
pub fn compile(mut self) -> Result<Expr, Error> {
self.compiled = Some(Tree::new(self.expression.clone()).compile()?);
Ok(self)
}
/// Execute the expression.
pub fn exec(&self) -> Result<Value, Error> {
if self.compiled.is_none() {
Tree::new(self.expression.clone()).compile()?(&self.contexts, &self.functions)
} else {
self.compiled.as_ref().unwrap()(&self.contexts, &self.functions)
}
}
fn get_compiled(&self) -> Option<&Compiled> {
self.compiled.as_ref()
}
}
impl Clone for Expr {
/// Returns a copy of the value. Notice that functions can not be cloned. The cloned expr's functions will be empty.
fn clone(&self) -> Expr {
Expr {
expression: self.expression.clone(),
| compiled: if self.compiled.is_some() {
Some(Tree::new(self.expression.clone()).compile().unwrap())
} else {
None
},
contexts: self.contexts.clone(),
functions: Functions::new(),
}
}
}
impl fmt::Debug for Expr {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(formatter, "{:?}", self.expression)
}
}
/// Execute options
pub struct ExecOptions<'a> {
expr: &'a Expr,
contexts: Option<&'a [Context]>,
functions: Option<&'a Functions>,
}
impl<'a> ExecOptions<'a> {
/// Create an option.
pub fn new(expr: &'a Expr) -> ExecOptions<'a> {
ExecOptions {
expr: expr,
contexts: None,
functions: None,
}
}
/// Set contexts.
pub fn contexts(&mut self, contexts: &'a [Context]) -> &'a mut ExecOptions {
self.contexts = Some(contexts);
self
}
/// Set functions.
pub fn functions(&mut self, functions: &'a Functions) -> &'a mut ExecOptions {
self.functions = Some(functions);
self
}
/// Execute the compiled expression.
pub fn exec(&self) -> Result<Value, Error> {
let empty_contexts = create_empty_contexts();
let empty_functions = Functions::new();
let contexts = if self.contexts.is_some() {
self.contexts.unwrap()
} else {
&empty_contexts
};
let functions = if self.functions.is_some() {
self.functions.unwrap()
} else {
&empty_functions
};
let compiled = self.expr.get_compiled();
if compiled.is_none() {
Tree::new(self.expr.expression.clone()).compile()?(contexts, functions)
} else {
compiled.unwrap()(contexts, functions)
}
}
}
fn create_empty_contexts() -> Contexts {
let mut contexts = Contexts::new();
contexts.push(Context::new());
contexts
} | random_line_split |
|
mod.rs |
use {Function, Functions, Context, Contexts, Compiled, Value};
use tree::Tree;
use error::Error;
use serde::Serialize;
use to_value;
use std::fmt;
/// Expression builder
pub struct Expr {
expression: String,
compiled: Option<Compiled>,
functions: Functions,
contexts: Contexts,
}
impl Expr {
/// Create an expression.
pub fn new<T: Into<String>>(expr: T) -> Expr {
Expr {
expression: expr.into(),
compiled: None,
functions: Functions::new(),
contexts: create_empty_contexts(),
}
}
/// Set function.
pub fn function<T, F>(mut self, name: T, function: F) -> Expr
where T: Into<String>,
F:'static + Fn(Vec<Value>) -> Result<Value, Error> + Sync + Send
{
self.functions.insert(name.into(), Function::new(function));
self
}
/// Set value.
pub fn value<T, V>(mut self, name: T, value: V) -> Expr
where T: Into<String>,
V: Serialize
{
self.contexts.last_mut().unwrap().insert(name.into(), to_value(value));
self
}
/// Compile an expression.
/// An expression can be compiled only once and then invoked multiple times with different context and function.
/// You can also execute a expression without compile.
pub fn compile(mut self) -> Result<Expr, Error> {
self.compiled = Some(Tree::new(self.expression.clone()).compile()?);
Ok(self)
}
/// Execute the expression.
pub fn exec(&self) -> Result<Value, Error> {
if self.compiled.is_none() {
Tree::new(self.expression.clone()).compile()?(&self.contexts, &self.functions)
} else {
self.compiled.as_ref().unwrap()(&self.contexts, &self.functions)
}
}
fn get_compiled(&self) -> Option<&Compiled> {
self.compiled.as_ref()
}
}
impl Clone for Expr {
/// Returns a copy of the value. Notice that functions can not be cloned. The cloned expr's functions will be empty.
fn clone(&self) -> Expr {
Expr {
expression: self.expression.clone(),
compiled: if self.compiled.is_some() {
Some(Tree::new(self.expression.clone()).compile().unwrap())
} else {
None
},
contexts: self.contexts.clone(),
functions: Functions::new(),
}
}
}
impl fmt::Debug for Expr {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(formatter, "{:?}", self.expression)
}
}
/// Execute options
pub struct ExecOptions<'a> {
expr: &'a Expr,
contexts: Option<&'a [Context]>,
functions: Option<&'a Functions>,
}
impl<'a> ExecOptions<'a> {
/// Create an option.
pub fn new(expr: &'a Expr) -> ExecOptions<'a> {
ExecOptions {
expr: expr,
contexts: None,
functions: None,
}
}
/// Set contexts.
pub fn | (&mut self, contexts: &'a [Context]) -> &'a mut ExecOptions {
self.contexts = Some(contexts);
self
}
/// Set functions.
pub fn functions(&mut self, functions: &'a Functions) -> &'a mut ExecOptions {
self.functions = Some(functions);
self
}
/// Execute the compiled expression.
pub fn exec(&self) -> Result<Value, Error> {
let empty_contexts = create_empty_contexts();
let empty_functions = Functions::new();
let contexts = if self.contexts.is_some() {
self.contexts.unwrap()
} else {
&empty_contexts
};
let functions = if self.functions.is_some() {
self.functions.unwrap()
} else {
&empty_functions
};
let compiled = self.expr.get_compiled();
if compiled.is_none() {
Tree::new(self.expr.expression.clone()).compile()?(contexts, functions)
} else {
compiled.unwrap()(contexts, functions)
}
}
}
fn create_empty_contexts() -> Contexts {
let mut contexts = Contexts::new();
contexts.push(Context::new());
contexts
}
| contexts | identifier_name |
mod.rs |
use {Function, Functions, Context, Contexts, Compiled, Value};
use tree::Tree;
use error::Error;
use serde::Serialize;
use to_value;
use std::fmt;
/// Expression builder
pub struct Expr {
expression: String,
compiled: Option<Compiled>,
functions: Functions,
contexts: Contexts,
}
impl Expr {
/// Create an expression.
pub fn new<T: Into<String>>(expr: T) -> Expr {
Expr {
expression: expr.into(),
compiled: None,
functions: Functions::new(),
contexts: create_empty_contexts(),
}
}
/// Set function.
pub fn function<T, F>(mut self, name: T, function: F) -> Expr
where T: Into<String>,
F:'static + Fn(Vec<Value>) -> Result<Value, Error> + Sync + Send
{
self.functions.insert(name.into(), Function::new(function));
self
}
/// Set value.
pub fn value<T, V>(mut self, name: T, value: V) -> Expr
where T: Into<String>,
V: Serialize
{
self.contexts.last_mut().unwrap().insert(name.into(), to_value(value));
self
}
/// Compile an expression.
/// An expression can be compiled only once and then invoked multiple times with different context and function.
/// You can also execute a expression without compile.
pub fn compile(mut self) -> Result<Expr, Error> {
self.compiled = Some(Tree::new(self.expression.clone()).compile()?);
Ok(self)
}
/// Execute the expression.
pub fn exec(&self) -> Result<Value, Error> {
if self.compiled.is_none() {
Tree::new(self.expression.clone()).compile()?(&self.contexts, &self.functions)
} else {
self.compiled.as_ref().unwrap()(&self.contexts, &self.functions)
}
}
fn get_compiled(&self) -> Option<&Compiled> {
self.compiled.as_ref()
}
}
impl Clone for Expr {
/// Returns a copy of the value. Notice that functions can not be cloned. The cloned expr's functions will be empty.
fn clone(&self) -> Expr {
Expr {
expression: self.expression.clone(),
compiled: if self.compiled.is_some() | else {
None
},
contexts: self.contexts.clone(),
functions: Functions::new(),
}
}
}
impl fmt::Debug for Expr {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(formatter, "{:?}", self.expression)
}
}
/// Execute options
pub struct ExecOptions<'a> {
expr: &'a Expr,
contexts: Option<&'a [Context]>,
functions: Option<&'a Functions>,
}
impl<'a> ExecOptions<'a> {
/// Create an option.
pub fn new(expr: &'a Expr) -> ExecOptions<'a> {
ExecOptions {
expr: expr,
contexts: None,
functions: None,
}
}
/// Set contexts.
pub fn contexts(&mut self, contexts: &'a [Context]) -> &'a mut ExecOptions {
self.contexts = Some(contexts);
self
}
/// Set functions.
pub fn functions(&mut self, functions: &'a Functions) -> &'a mut ExecOptions {
self.functions = Some(functions);
self
}
/// Execute the compiled expression.
pub fn exec(&self) -> Result<Value, Error> {
let empty_contexts = create_empty_contexts();
let empty_functions = Functions::new();
let contexts = if self.contexts.is_some() {
self.contexts.unwrap()
} else {
&empty_contexts
};
let functions = if self.functions.is_some() {
self.functions.unwrap()
} else {
&empty_functions
};
let compiled = self.expr.get_compiled();
if compiled.is_none() {
Tree::new(self.expr.expression.clone()).compile()?(contexts, functions)
} else {
compiled.unwrap()(contexts, functions)
}
}
}
fn create_empty_contexts() -> Contexts {
let mut contexts = Contexts::new();
contexts.push(Context::new());
contexts
}
| {
Some(Tree::new(self.expression.clone()).compile().unwrap())
} | conditional_block |
helpers.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethkey::KeyPair;
use io::*;
use client::{BlockChainClient, Client, ClientConfig};
use util::*;
use spec::*;
use account_provider::AccountProvider;
use state_db::StateDB;
use block::{OpenBlock, Drain};
use blockchain::{BlockChain, Config as BlockChainConfig};
use builtin::Builtin;
use state::*;
use evm::{Schedule, Factory as EvmFactory};
use factory::Factories;
use engines::Engine;
use ethereum;
use ethereum::ethash::EthashParams;
use miner::Miner;
use header::Header;
use transaction::{Action, Transaction, SignedTransaction};
use rlp::{self, RlpStream};
use views::BlockView;
#[cfg(feature = "json-tests")]
pub enum ChainEra {
Frontier,
Homestead,
Eip150,
_Eip161,
TransitionTest,
}
pub struct TestEngine {
engine: Arc<Engine>,
max_depth: usize,
}
impl TestEngine {
pub fn new(max_depth: usize) -> TestEngine {
TestEngine {
engine: ethereum::new_frontier_test().engine,
max_depth: max_depth,
}
}
pub fn new_metropolis() -> TestEngine {
TestEngine {
engine: ethereum::new_metropolis_test().engine,
max_depth: 0,
}
}
}
impl Engine for TestEngine {
fn name(&self) -> &str {
"TestEngine"
}
fn params(&self) -> &CommonParams {
self.engine.params()
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
self.engine.builtins()
}
fn schedule(&self, _block_number: u64) -> Schedule {
let mut schedule = self.engine.schedule(0);
schedule.max_depth = self.max_depth;
schedule
}
}
// TODO: move everything over to get_null_spec.
pub fn get_test_spec() -> Spec {
Spec::new_test()
}
pub fn create_test_block(header: &Header) -> Bytes {
let mut rlp = RlpStream::new_list(3);
rlp.append(header);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.out()
}
fn create_unverifiable_block_header(order: u32, parent_hash: H256) -> Header {
let mut header = Header::new();
header.set_gas_limit(0.into());
header.set_difficulty((order * 100).into());
header.set_timestamp((order * 10) as u64);
header.set_number(order as u64);
header.set_parent_hash(parent_hash);
header.set_state_root(H256::zero());
header
}
fn create_unverifiable_block_with_extra(order: u32, parent_hash: H256, extra: Option<Bytes>) -> Bytes {
let mut header = create_unverifiable_block_header(order, parent_hash);
header.set_extra_data(match extra {
Some(extra_data) => extra_data,
None => {
let base = (order & 0x000000ff) as u8;
let generated: Vec<u8> = vec![base + 1, base + 2, base + 3];
generated
}
});
create_test_block(&header)
}
fn create_unverifiable_block(order: u32, parent_hash: H256) -> Bytes {
create_test_block(&create_unverifiable_block_header(order, parent_hash))
}
pub fn create_test_block_with_data(header: &Header, transactions: &[SignedTransaction], uncles: &[Header]) -> Bytes {
let mut rlp = RlpStream::new_list(3);
rlp.append(header); | rlp.append_list(&uncles);
rlp.out()
}
pub fn generate_dummy_client(block_number: u32) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_test, block_number, 0, &[])
}
pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_null, block_number, txs_per_block, tx_gas_prices)
}
pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, None, block_number, txs_per_block, tx_gas_prices)
}
pub fn generate_dummy_client_with_spec_and_accounts<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, accounts, 0, 0, &[])
}
pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
let test_spec = get_test_spec();
let client_db = new_db();
let client = Client::new(
ClientConfig::default(),
&test_spec,
client_db,
Arc::new(Miner::with_spec_and_accounts(&test_spec, accounts)),
IoChannel::disconnected(),
).unwrap();
let test_engine = &*test_spec.engine;
let mut db = test_spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let genesis_header = test_spec.genesis_header();
let mut rolling_timestamp = 40;
let mut last_hashes = vec![];
let mut last_header = genesis_header.clone();
let kp = KeyPair::from_secret_slice(&"".sha3()).unwrap();
let author = kp.address();
let mut n = 0;
for _ in 0..block_number {
last_hashes.push(last_header.hash());
// forge block.
let mut b = OpenBlock::new(
test_engine,
Default::default(),
false,
db,
&last_header,
Arc::new(last_hashes.clone()),
author.clone(),
(3141562.into(), 31415620.into()),
vec![],
false,
).unwrap();
b.set_difficulty(U256::from(0x20000));
rolling_timestamp += 10;
b.set_timestamp(rolling_timestamp);
// first block we don't have any balance, so can't send any transactions.
for _ in 0..txs_per_block {
b.push_transaction(Transaction {
nonce: n.into(),
gas_price: tx_gas_prices[n % tx_gas_prices.len()],
gas: 100000.into(),
action: Action::Create,
data: vec![],
value: U256::zero(),
}.sign(kp.secret(), Some(test_spec.network_id())), None).unwrap();
n += 1;
}
let b = b.close_and_lock().seal(test_engine, vec![]).unwrap();
if let Err(e) = client.import_block(b.rlp_bytes()) {
panic!("error importing block which is valid by definition: {:?}", e);
}
last_header = BlockView::new(&b.rlp_bytes()).header();
db = b.drain();
}
client.flush_queue();
client.import_verified_blocks();
client
}
pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) {
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
//let test_engine = test_spec.to_engine().unwrap();
let state_root = test_spec.genesis_header().state_root().clone();
let mut rolling_hash = client.chain_info().best_block_hash;
let mut rolling_block_number = starting_number as u64;
let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10;
for _ in 0..block_number {
let mut header = Header::new();
header.set_gas_limit(test_engine.params().min_gas_limit);
header.set_difficulty(U256::from(0x20000));
header.set_timestamp(rolling_timestamp);
header.set_number(rolling_block_number);
header.set_parent_hash(rolling_hash);
header.set_state_root(state_root);
rolling_hash = header.hash();
rolling_block_number = rolling_block_number + 1;
rolling_timestamp = rolling_timestamp + 10;
if let Err(e) = client.import_block(create_test_block(&header)) {
panic!("error importing block which is valid by definition: {:?}", e);
}
}
}
pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
let test_spec = get_test_spec();
let client_db = new_db();
let client = Client::new(
ClientConfig::default(),
&test_spec,
client_db,
Arc::new(Miner::with_spec(&test_spec)),
IoChannel::disconnected(),
).unwrap();
for block in &blocks {
if client.import_block(block.clone()).is_err() {
panic!("panic importing block which is well-formed");
}
}
client.flush_queue();
client.import_verified_blocks();
client
}
fn new_db() -> Arc<KeyValueDB> {
Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)))
}
pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&mut batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
bc.commit();
}
db.write(batch).unwrap();
bc
}
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&mut batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
bc.commit();
}
db.write(batch).unwrap();
bc
}
pub fn generate_dummy_empty_blockchain() -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
bc
}
pub fn get_temp_state() -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db();
State::new(journal_db, U256::from(0), Default::default())
}
pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db();
let mut factories = Factories::default();
factories.vm = factory;
State::new(journal_db, U256::from(0), factories)
}
pub fn get_temp_state_db() -> StateDB {
let db = new_db();
let journal_db = journaldb::new(db, journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
StateDB::new(journal_db, 5 * 1024 * 1024)
}
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
let test_spec = get_test_spec();
get_good_dummy_block_fork_seq(1, count, &test_spec.genesis_header().hash())
}
pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_hash: &H256) -> Vec<Bytes> {
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
let mut rolling_timestamp = start_number as u64 * 10;
let mut parent = *parent_hash;
let mut r = Vec::new();
for i in start_number.. start_number + count + 1 {
let mut block_header = Header::new();
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(i).mul(U256([0, 1, 0, 0])));
block_header.set_timestamp(rolling_timestamp);
block_header.set_number(i as u64);
block_header.set_parent_hash(parent);
block_header.set_state_root(test_spec.genesis_header().state_root().clone());
parent = block_header.hash();
rolling_timestamp = rolling_timestamp + 10;
r.push(create_test_block(&block_header));
}
r
}
pub fn get_good_dummy_block_hash() -> (H256, Bytes) {
let mut block_header = Header::new();
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40);
block_header.set_number(1);
block_header.set_parent_hash(test_spec.genesis_header().hash());
block_header.set_state_root(test_spec.genesis_header().state_root().clone());
(block_header.hash(), create_test_block(&block_header))
}
pub fn get_good_dummy_block() -> Bytes {
let (_, bytes) = get_good_dummy_block_hash();
bytes
}
pub fn get_bad_state_dummy_block() -> Bytes {
let mut block_header = Header::new();
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40);
block_header.set_number(1);
block_header.set_parent_hash(test_spec.genesis_header().hash());
block_header.set_state_root(0xbad.into());
create_test_block(&block_header)
}
pub fn get_default_ethash_params() -> EthashParams{
EthashParams {
gas_limit_bound_divisor: U256::from(1024),
minimum_difficulty: U256::from(131072),
difficulty_bound_divisor: U256::from(2048),
difficulty_increment_divisor: 10,
metropolis_difficulty_increment_divisor: 9,
duration_limit: 13,
block_reward: U256::from(0),
registrar: "0000000000000000000000000000000000000001".into(),
homestead_transition: 1150000,
dao_hardfork_transition: u64::max_value(),
dao_hardfork_beneficiary: "0000000000000000000000000000000000000001".into(),
dao_hardfork_accounts: vec![],
difficulty_hardfork_transition: u64::max_value(),
difficulty_hardfork_bound_divisor: U256::from(0),
bomb_defuse_transition: u64::max_value(),
eip100b_transition: u64::max_value(),
eip150_transition: u64::max_value(),
eip155_transition: u64::max_value(),
eip160_transition: u64::max_value(),
eip161abc_transition: u64::max_value(),
eip161d_transition: u64::max_value(),
ecip1010_pause_transition: u64::max_value(),
ecip1010_continue_transition: u64::max_value(),
ecip1017_era_rounds: u64::max_value(),
max_code_size: u64::max_value(),
max_gas_limit_transition: u64::max_value(),
max_gas_limit: U256::max_value(),
min_gas_price_transition: u64::max_value(),
min_gas_price: U256::zero(),
}
} | rlp.begin_list(transactions.len());
for t in transactions {
rlp.append_raw(&rlp::encode(t).into_vec(), 1);
} | random_line_split |
helpers.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethkey::KeyPair;
use io::*;
use client::{BlockChainClient, Client, ClientConfig};
use util::*;
use spec::*;
use account_provider::AccountProvider;
use state_db::StateDB;
use block::{OpenBlock, Drain};
use blockchain::{BlockChain, Config as BlockChainConfig};
use builtin::Builtin;
use state::*;
use evm::{Schedule, Factory as EvmFactory};
use factory::Factories;
use engines::Engine;
use ethereum;
use ethereum::ethash::EthashParams;
use miner::Miner;
use header::Header;
use transaction::{Action, Transaction, SignedTransaction};
use rlp::{self, RlpStream};
use views::BlockView;
#[cfg(feature = "json-tests")]
pub enum ChainEra {
Frontier,
Homestead,
Eip150,
_Eip161,
TransitionTest,
}
pub struct TestEngine {
engine: Arc<Engine>,
max_depth: usize,
}
impl TestEngine {
pub fn new(max_depth: usize) -> TestEngine {
TestEngine {
engine: ethereum::new_frontier_test().engine,
max_depth: max_depth,
}
}
pub fn new_metropolis() -> TestEngine {
TestEngine {
engine: ethereum::new_metropolis_test().engine,
max_depth: 0,
}
}
}
impl Engine for TestEngine {
fn name(&self) -> &str {
"TestEngine"
}
fn params(&self) -> &CommonParams {
self.engine.params()
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
self.engine.builtins()
}
fn schedule(&self, _block_number: u64) -> Schedule {
let mut schedule = self.engine.schedule(0);
schedule.max_depth = self.max_depth;
schedule
}
}
// TODO: move everything over to get_null_spec.
pub fn get_test_spec() -> Spec {
Spec::new_test()
}
pub fn create_test_block(header: &Header) -> Bytes {
let mut rlp = RlpStream::new_list(3);
rlp.append(header);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.out()
}
fn create_unverifiable_block_header(order: u32, parent_hash: H256) -> Header {
let mut header = Header::new();
header.set_gas_limit(0.into());
header.set_difficulty((order * 100).into());
header.set_timestamp((order * 10) as u64);
header.set_number(order as u64);
header.set_parent_hash(parent_hash);
header.set_state_root(H256::zero());
header
}
fn create_unverifiable_block_with_extra(order: u32, parent_hash: H256, extra: Option<Bytes>) -> Bytes {
let mut header = create_unverifiable_block_header(order, parent_hash);
header.set_extra_data(match extra {
Some(extra_data) => extra_data,
None => {
let base = (order & 0x000000ff) as u8;
let generated: Vec<u8> = vec![base + 1, base + 2, base + 3];
generated
}
});
create_test_block(&header)
}
fn create_unverifiable_block(order: u32, parent_hash: H256) -> Bytes {
create_test_block(&create_unverifiable_block_header(order, parent_hash))
}
pub fn create_test_block_with_data(header: &Header, transactions: &[SignedTransaction], uncles: &[Header]) -> Bytes {
let mut rlp = RlpStream::new_list(3);
rlp.append(header);
rlp.begin_list(transactions.len());
for t in transactions {
rlp.append_raw(&rlp::encode(t).into_vec(), 1);
}
rlp.append_list(&uncles);
rlp.out()
}
pub fn generate_dummy_client(block_number: u32) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_test, block_number, 0, &[])
}
pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_null, block_number, txs_per_block, tx_gas_prices)
}
pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, None, block_number, txs_per_block, tx_gas_prices)
}
pub fn generate_dummy_client_with_spec_and_accounts<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, accounts, 0, 0, &[])
}
pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
let test_spec = get_test_spec();
let client_db = new_db();
let client = Client::new(
ClientConfig::default(),
&test_spec,
client_db,
Arc::new(Miner::with_spec_and_accounts(&test_spec, accounts)),
IoChannel::disconnected(),
).unwrap();
let test_engine = &*test_spec.engine;
let mut db = test_spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let genesis_header = test_spec.genesis_header();
let mut rolling_timestamp = 40;
let mut last_hashes = vec![];
let mut last_header = genesis_header.clone();
let kp = KeyPair::from_secret_slice(&"".sha3()).unwrap();
let author = kp.address();
let mut n = 0;
for _ in 0..block_number {
last_hashes.push(last_header.hash());
// forge block.
let mut b = OpenBlock::new(
test_engine,
Default::default(),
false,
db,
&last_header,
Arc::new(last_hashes.clone()),
author.clone(),
(3141562.into(), 31415620.into()),
vec![],
false,
).unwrap();
b.set_difficulty(U256::from(0x20000));
rolling_timestamp += 10;
b.set_timestamp(rolling_timestamp);
// first block we don't have any balance, so can't send any transactions.
for _ in 0..txs_per_block {
b.push_transaction(Transaction {
nonce: n.into(),
gas_price: tx_gas_prices[n % tx_gas_prices.len()],
gas: 100000.into(),
action: Action::Create,
data: vec![],
value: U256::zero(),
}.sign(kp.secret(), Some(test_spec.network_id())), None).unwrap();
n += 1;
}
let b = b.close_and_lock().seal(test_engine, vec![]).unwrap();
if let Err(e) = client.import_block(b.rlp_bytes()) {
panic!("error importing block which is valid by definition: {:?}", e);
}
last_header = BlockView::new(&b.rlp_bytes()).header();
db = b.drain();
}
client.flush_queue();
client.import_verified_blocks();
client
}
pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) {
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
//let test_engine = test_spec.to_engine().unwrap();
let state_root = test_spec.genesis_header().state_root().clone();
let mut rolling_hash = client.chain_info().best_block_hash;
let mut rolling_block_number = starting_number as u64;
let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10;
for _ in 0..block_number {
let mut header = Header::new();
header.set_gas_limit(test_engine.params().min_gas_limit);
header.set_difficulty(U256::from(0x20000));
header.set_timestamp(rolling_timestamp);
header.set_number(rolling_block_number);
header.set_parent_hash(rolling_hash);
header.set_state_root(state_root);
rolling_hash = header.hash();
rolling_block_number = rolling_block_number + 1;
rolling_timestamp = rolling_timestamp + 10;
if let Err(e) = client.import_block(create_test_block(&header)) {
panic!("error importing block which is valid by definition: {:?}", e);
}
}
}
pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
let test_spec = get_test_spec();
let client_db = new_db();
let client = Client::new(
ClientConfig::default(),
&test_spec,
client_db,
Arc::new(Miner::with_spec(&test_spec)),
IoChannel::disconnected(),
).unwrap();
for block in &blocks {
if client.import_block(block.clone()).is_err() |
}
client.flush_queue();
client.import_verified_blocks();
client
}
fn new_db() -> Arc<KeyValueDB> {
Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)))
}
pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&mut batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
bc.commit();
}
db.write(batch).unwrap();
bc
}
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&mut batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
bc.commit();
}
db.write(batch).unwrap();
bc
}
pub fn generate_dummy_empty_blockchain() -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
bc
}
pub fn get_temp_state() -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db();
State::new(journal_db, U256::from(0), Default::default())
}
pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db();
let mut factories = Factories::default();
factories.vm = factory;
State::new(journal_db, U256::from(0), factories)
}
pub fn get_temp_state_db() -> StateDB {
let db = new_db();
let journal_db = journaldb::new(db, journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
StateDB::new(journal_db, 5 * 1024 * 1024)
}
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
let test_spec = get_test_spec();
get_good_dummy_block_fork_seq(1, count, &test_spec.genesis_header().hash())
}
pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_hash: &H256) -> Vec<Bytes> {
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
let mut rolling_timestamp = start_number as u64 * 10;
let mut parent = *parent_hash;
let mut r = Vec::new();
for i in start_number.. start_number + count + 1 {
let mut block_header = Header::new();
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(i).mul(U256([0, 1, 0, 0])));
block_header.set_timestamp(rolling_timestamp);
block_header.set_number(i as u64);
block_header.set_parent_hash(parent);
block_header.set_state_root(test_spec.genesis_header().state_root().clone());
parent = block_header.hash();
rolling_timestamp = rolling_timestamp + 10;
r.push(create_test_block(&block_header));
}
r
}
pub fn get_good_dummy_block_hash() -> (H256, Bytes) {
let mut block_header = Header::new();
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40);
block_header.set_number(1);
block_header.set_parent_hash(test_spec.genesis_header().hash());
block_header.set_state_root(test_spec.genesis_header().state_root().clone());
(block_header.hash(), create_test_block(&block_header))
}
pub fn get_good_dummy_block() -> Bytes {
let (_, bytes) = get_good_dummy_block_hash();
bytes
}
pub fn get_bad_state_dummy_block() -> Bytes {
let mut block_header = Header::new();
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40);
block_header.set_number(1);
block_header.set_parent_hash(test_spec.genesis_header().hash());
block_header.set_state_root(0xbad.into());
create_test_block(&block_header)
}
pub fn get_default_ethash_params() -> EthashParams{
EthashParams {
gas_limit_bound_divisor: U256::from(1024),
minimum_difficulty: U256::from(131072),
difficulty_bound_divisor: U256::from(2048),
difficulty_increment_divisor: 10,
metropolis_difficulty_increment_divisor: 9,
duration_limit: 13,
block_reward: U256::from(0),
registrar: "0000000000000000000000000000000000000001".into(),
homestead_transition: 1150000,
dao_hardfork_transition: u64::max_value(),
dao_hardfork_beneficiary: "0000000000000000000000000000000000000001".into(),
dao_hardfork_accounts: vec![],
difficulty_hardfork_transition: u64::max_value(),
difficulty_hardfork_bound_divisor: U256::from(0),
bomb_defuse_transition: u64::max_value(),
eip100b_transition: u64::max_value(),
eip150_transition: u64::max_value(),
eip155_transition: u64::max_value(),
eip160_transition: u64::max_value(),
eip161abc_transition: u64::max_value(),
eip161d_transition: u64::max_value(),
ecip1010_pause_transition: u64::max_value(),
ecip1010_continue_transition: u64::max_value(),
ecip1017_era_rounds: u64::max_value(),
max_code_size: u64::max_value(),
max_gas_limit_transition: u64::max_value(),
max_gas_limit: U256::max_value(),
min_gas_price_transition: u64::max_value(),
min_gas_price: U256::zero(),
}
}
| {
panic!("panic importing block which is well-formed");
} | conditional_block |
helpers.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethkey::KeyPair;
use io::*;
use client::{BlockChainClient, Client, ClientConfig};
use util::*;
use spec::*;
use account_provider::AccountProvider;
use state_db::StateDB;
use block::{OpenBlock, Drain};
use blockchain::{BlockChain, Config as BlockChainConfig};
use builtin::Builtin;
use state::*;
use evm::{Schedule, Factory as EvmFactory};
use factory::Factories;
use engines::Engine;
use ethereum;
use ethereum::ethash::EthashParams;
use miner::Miner;
use header::Header;
use transaction::{Action, Transaction, SignedTransaction};
use rlp::{self, RlpStream};
use views::BlockView;
#[cfg(feature = "json-tests")]
pub enum ChainEra {
Frontier,
Homestead,
Eip150,
_Eip161,
TransitionTest,
}
pub struct TestEngine {
engine: Arc<Engine>,
max_depth: usize,
}
impl TestEngine {
pub fn new(max_depth: usize) -> TestEngine {
TestEngine {
engine: ethereum::new_frontier_test().engine,
max_depth: max_depth,
}
}
pub fn new_metropolis() -> TestEngine {
TestEngine {
engine: ethereum::new_metropolis_test().engine,
max_depth: 0,
}
}
}
impl Engine for TestEngine {
fn name(&self) -> &str {
"TestEngine"
}
fn params(&self) -> &CommonParams {
self.engine.params()
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
self.engine.builtins()
}
fn schedule(&self, _block_number: u64) -> Schedule {
let mut schedule = self.engine.schedule(0);
schedule.max_depth = self.max_depth;
schedule
}
}
// TODO: move everything over to get_null_spec.
pub fn get_test_spec() -> Spec {
Spec::new_test()
}
pub fn create_test_block(header: &Header) -> Bytes {
let mut rlp = RlpStream::new_list(3);
rlp.append(header);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.out()
}
fn create_unverifiable_block_header(order: u32, parent_hash: H256) -> Header {
let mut header = Header::new();
header.set_gas_limit(0.into());
header.set_difficulty((order * 100).into());
header.set_timestamp((order * 10) as u64);
header.set_number(order as u64);
header.set_parent_hash(parent_hash);
header.set_state_root(H256::zero());
header
}
fn create_unverifiable_block_with_extra(order: u32, parent_hash: H256, extra: Option<Bytes>) -> Bytes {
let mut header = create_unverifiable_block_header(order, parent_hash);
header.set_extra_data(match extra {
Some(extra_data) => extra_data,
None => {
let base = (order & 0x000000ff) as u8;
let generated: Vec<u8> = vec![base + 1, base + 2, base + 3];
generated
}
});
create_test_block(&header)
}
fn create_unverifiable_block(order: u32, parent_hash: H256) -> Bytes {
create_test_block(&create_unverifiable_block_header(order, parent_hash))
}
pub fn create_test_block_with_data(header: &Header, transactions: &[SignedTransaction], uncles: &[Header]) -> Bytes {
let mut rlp = RlpStream::new_list(3);
rlp.append(header);
rlp.begin_list(transactions.len());
for t in transactions {
rlp.append_raw(&rlp::encode(t).into_vec(), 1);
}
rlp.append_list(&uncles);
rlp.out()
}
pub fn generate_dummy_client(block_number: u32) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_test, block_number, 0, &[])
}
pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_null, block_number, txs_per_block, tx_gas_prices)
}
pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, None, block_number, txs_per_block, tx_gas_prices)
}
pub fn generate_dummy_client_with_spec_and_accounts<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, accounts, 0, 0, &[])
}
pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
let test_spec = get_test_spec();
let client_db = new_db();
let client = Client::new(
ClientConfig::default(),
&test_spec,
client_db,
Arc::new(Miner::with_spec_and_accounts(&test_spec, accounts)),
IoChannel::disconnected(),
).unwrap();
let test_engine = &*test_spec.engine;
let mut db = test_spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let genesis_header = test_spec.genesis_header();
let mut rolling_timestamp = 40;
let mut last_hashes = vec![];
let mut last_header = genesis_header.clone();
let kp = KeyPair::from_secret_slice(&"".sha3()).unwrap();
let author = kp.address();
let mut n = 0;
for _ in 0..block_number {
last_hashes.push(last_header.hash());
// forge block.
let mut b = OpenBlock::new(
test_engine,
Default::default(),
false,
db,
&last_header,
Arc::new(last_hashes.clone()),
author.clone(),
(3141562.into(), 31415620.into()),
vec![],
false,
).unwrap();
b.set_difficulty(U256::from(0x20000));
rolling_timestamp += 10;
b.set_timestamp(rolling_timestamp);
// first block we don't have any balance, so can't send any transactions.
for _ in 0..txs_per_block {
b.push_transaction(Transaction {
nonce: n.into(),
gas_price: tx_gas_prices[n % tx_gas_prices.len()],
gas: 100000.into(),
action: Action::Create,
data: vec![],
value: U256::zero(),
}.sign(kp.secret(), Some(test_spec.network_id())), None).unwrap();
n += 1;
}
let b = b.close_and_lock().seal(test_engine, vec![]).unwrap();
if let Err(e) = client.import_block(b.rlp_bytes()) {
panic!("error importing block which is valid by definition: {:?}", e);
}
last_header = BlockView::new(&b.rlp_bytes()).header();
db = b.drain();
}
client.flush_queue();
client.import_verified_blocks();
client
}
pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) {
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
//let test_engine = test_spec.to_engine().unwrap();
let state_root = test_spec.genesis_header().state_root().clone();
let mut rolling_hash = client.chain_info().best_block_hash;
let mut rolling_block_number = starting_number as u64;
let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10;
for _ in 0..block_number {
let mut header = Header::new();
header.set_gas_limit(test_engine.params().min_gas_limit);
header.set_difficulty(U256::from(0x20000));
header.set_timestamp(rolling_timestamp);
header.set_number(rolling_block_number);
header.set_parent_hash(rolling_hash);
header.set_state_root(state_root);
rolling_hash = header.hash();
rolling_block_number = rolling_block_number + 1;
rolling_timestamp = rolling_timestamp + 10;
if let Err(e) = client.import_block(create_test_block(&header)) {
panic!("error importing block which is valid by definition: {:?}", e);
}
}
}
pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
let test_spec = get_test_spec();
let client_db = new_db();
let client = Client::new(
ClientConfig::default(),
&test_spec,
client_db,
Arc::new(Miner::with_spec(&test_spec)),
IoChannel::disconnected(),
).unwrap();
for block in &blocks {
if client.import_block(block.clone()).is_err() {
panic!("panic importing block which is well-formed");
}
}
client.flush_queue();
client.import_verified_blocks();
client
}
fn new_db() -> Arc<KeyValueDB> {
Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)))
}
pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&mut batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
bc.commit();
}
db.write(batch).unwrap();
bc
}
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&mut batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
bc.commit();
}
db.write(batch).unwrap();
bc
}
pub fn generate_dummy_empty_blockchain() -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
bc
}
pub fn | () -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db();
State::new(journal_db, U256::from(0), Default::default())
}
pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db();
let mut factories = Factories::default();
factories.vm = factory;
State::new(journal_db, U256::from(0), factories)
}
pub fn get_temp_state_db() -> StateDB {
let db = new_db();
let journal_db = journaldb::new(db, journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
StateDB::new(journal_db, 5 * 1024 * 1024)
}
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
let test_spec = get_test_spec();
get_good_dummy_block_fork_seq(1, count, &test_spec.genesis_header().hash())
}
pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_hash: &H256) -> Vec<Bytes> {
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
let mut rolling_timestamp = start_number as u64 * 10;
let mut parent = *parent_hash;
let mut r = Vec::new();
for i in start_number.. start_number + count + 1 {
let mut block_header = Header::new();
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(i).mul(U256([0, 1, 0, 0])));
block_header.set_timestamp(rolling_timestamp);
block_header.set_number(i as u64);
block_header.set_parent_hash(parent);
block_header.set_state_root(test_spec.genesis_header().state_root().clone());
parent = block_header.hash();
rolling_timestamp = rolling_timestamp + 10;
r.push(create_test_block(&block_header));
}
r
}
pub fn get_good_dummy_block_hash() -> (H256, Bytes) {
let mut block_header = Header::new();
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40);
block_header.set_number(1);
block_header.set_parent_hash(test_spec.genesis_header().hash());
block_header.set_state_root(test_spec.genesis_header().state_root().clone());
(block_header.hash(), create_test_block(&block_header))
}
pub fn get_good_dummy_block() -> Bytes {
let (_, bytes) = get_good_dummy_block_hash();
bytes
}
pub fn get_bad_state_dummy_block() -> Bytes {
let mut block_header = Header::new();
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40);
block_header.set_number(1);
block_header.set_parent_hash(test_spec.genesis_header().hash());
block_header.set_state_root(0xbad.into());
create_test_block(&block_header)
}
pub fn get_default_ethash_params() -> EthashParams{
EthashParams {
gas_limit_bound_divisor: U256::from(1024),
minimum_difficulty: U256::from(131072),
difficulty_bound_divisor: U256::from(2048),
difficulty_increment_divisor: 10,
metropolis_difficulty_increment_divisor: 9,
duration_limit: 13,
block_reward: U256::from(0),
registrar: "0000000000000000000000000000000000000001".into(),
homestead_transition: 1150000,
dao_hardfork_transition: u64::max_value(),
dao_hardfork_beneficiary: "0000000000000000000000000000000000000001".into(),
dao_hardfork_accounts: vec![],
difficulty_hardfork_transition: u64::max_value(),
difficulty_hardfork_bound_divisor: U256::from(0),
bomb_defuse_transition: u64::max_value(),
eip100b_transition: u64::max_value(),
eip150_transition: u64::max_value(),
eip155_transition: u64::max_value(),
eip160_transition: u64::max_value(),
eip161abc_transition: u64::max_value(),
eip161d_transition: u64::max_value(),
ecip1010_pause_transition: u64::max_value(),
ecip1010_continue_transition: u64::max_value(),
ecip1017_era_rounds: u64::max_value(),
max_code_size: u64::max_value(),
max_gas_limit_transition: u64::max_value(),
max_gas_limit: U256::max_value(),
min_gas_price_transition: u64::max_value(),
min_gas_price: U256::zero(),
}
}
| get_temp_state | identifier_name |
helpers.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethkey::KeyPair;
use io::*;
use client::{BlockChainClient, Client, ClientConfig};
use util::*;
use spec::*;
use account_provider::AccountProvider;
use state_db::StateDB;
use block::{OpenBlock, Drain};
use blockchain::{BlockChain, Config as BlockChainConfig};
use builtin::Builtin;
use state::*;
use evm::{Schedule, Factory as EvmFactory};
use factory::Factories;
use engines::Engine;
use ethereum;
use ethereum::ethash::EthashParams;
use miner::Miner;
use header::Header;
use transaction::{Action, Transaction, SignedTransaction};
use rlp::{self, RlpStream};
use views::BlockView;
#[cfg(feature = "json-tests")]
pub enum ChainEra {
Frontier,
Homestead,
Eip150,
_Eip161,
TransitionTest,
}
pub struct TestEngine {
engine: Arc<Engine>,
max_depth: usize,
}
impl TestEngine {
pub fn new(max_depth: usize) -> TestEngine {
TestEngine {
engine: ethereum::new_frontier_test().engine,
max_depth: max_depth,
}
}
pub fn new_metropolis() -> TestEngine {
TestEngine {
engine: ethereum::new_metropolis_test().engine,
max_depth: 0,
}
}
}
impl Engine for TestEngine {
fn name(&self) -> &str {
"TestEngine"
}
fn params(&self) -> &CommonParams {
self.engine.params()
}
fn builtins(&self) -> &BTreeMap<Address, Builtin> {
self.engine.builtins()
}
fn schedule(&self, _block_number: u64) -> Schedule {
let mut schedule = self.engine.schedule(0);
schedule.max_depth = self.max_depth;
schedule
}
}
// TODO: move everything over to get_null_spec.
pub fn get_test_spec() -> Spec {
Spec::new_test()
}
pub fn create_test_block(header: &Header) -> Bytes {
let mut rlp = RlpStream::new_list(3);
rlp.append(header);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
rlp.out()
}
fn create_unverifiable_block_header(order: u32, parent_hash: H256) -> Header {
let mut header = Header::new();
header.set_gas_limit(0.into());
header.set_difficulty((order * 100).into());
header.set_timestamp((order * 10) as u64);
header.set_number(order as u64);
header.set_parent_hash(parent_hash);
header.set_state_root(H256::zero());
header
}
fn create_unverifiable_block_with_extra(order: u32, parent_hash: H256, extra: Option<Bytes>) -> Bytes {
let mut header = create_unverifiable_block_header(order, parent_hash);
header.set_extra_data(match extra {
Some(extra_data) => extra_data,
None => {
let base = (order & 0x000000ff) as u8;
let generated: Vec<u8> = vec![base + 1, base + 2, base + 3];
generated
}
});
create_test_block(&header)
}
fn create_unverifiable_block(order: u32, parent_hash: H256) -> Bytes {
create_test_block(&create_unverifiable_block_header(order, parent_hash))
}
pub fn create_test_block_with_data(header: &Header, transactions: &[SignedTransaction], uncles: &[Header]) -> Bytes {
let mut rlp = RlpStream::new_list(3);
rlp.append(header);
rlp.begin_list(transactions.len());
for t in transactions {
rlp.append_raw(&rlp::encode(t).into_vec(), 1);
}
rlp.append_list(&uncles);
rlp.out()
}
pub fn generate_dummy_client(block_number: u32) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_test, block_number, 0, &[])
}
pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_null, block_number, txs_per_block, tx_gas_prices)
}
pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, None, block_number, txs_per_block, tx_gas_prices)
}
pub fn generate_dummy_client_with_spec_and_accounts<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, accounts, 0, 0, &[])
}
pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
let test_spec = get_test_spec();
let client_db = new_db();
let client = Client::new(
ClientConfig::default(),
&test_spec,
client_db,
Arc::new(Miner::with_spec_and_accounts(&test_spec, accounts)),
IoChannel::disconnected(),
).unwrap();
let test_engine = &*test_spec.engine;
let mut db = test_spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let genesis_header = test_spec.genesis_header();
let mut rolling_timestamp = 40;
let mut last_hashes = vec![];
let mut last_header = genesis_header.clone();
let kp = KeyPair::from_secret_slice(&"".sha3()).unwrap();
let author = kp.address();
let mut n = 0;
for _ in 0..block_number {
last_hashes.push(last_header.hash());
// forge block.
let mut b = OpenBlock::new(
test_engine,
Default::default(),
false,
db,
&last_header,
Arc::new(last_hashes.clone()),
author.clone(),
(3141562.into(), 31415620.into()),
vec![],
false,
).unwrap();
b.set_difficulty(U256::from(0x20000));
rolling_timestamp += 10;
b.set_timestamp(rolling_timestamp);
// first block we don't have any balance, so can't send any transactions.
for _ in 0..txs_per_block {
b.push_transaction(Transaction {
nonce: n.into(),
gas_price: tx_gas_prices[n % tx_gas_prices.len()],
gas: 100000.into(),
action: Action::Create,
data: vec![],
value: U256::zero(),
}.sign(kp.secret(), Some(test_spec.network_id())), None).unwrap();
n += 1;
}
let b = b.close_and_lock().seal(test_engine, vec![]).unwrap();
if let Err(e) = client.import_block(b.rlp_bytes()) {
panic!("error importing block which is valid by definition: {:?}", e);
}
last_header = BlockView::new(&b.rlp_bytes()).header();
db = b.drain();
}
client.flush_queue();
client.import_verified_blocks();
client
}
pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) {
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
//let test_engine = test_spec.to_engine().unwrap();
let state_root = test_spec.genesis_header().state_root().clone();
let mut rolling_hash = client.chain_info().best_block_hash;
let mut rolling_block_number = starting_number as u64;
let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10;
for _ in 0..block_number {
let mut header = Header::new();
header.set_gas_limit(test_engine.params().min_gas_limit);
header.set_difficulty(U256::from(0x20000));
header.set_timestamp(rolling_timestamp);
header.set_number(rolling_block_number);
header.set_parent_hash(rolling_hash);
header.set_state_root(state_root);
rolling_hash = header.hash();
rolling_block_number = rolling_block_number + 1;
rolling_timestamp = rolling_timestamp + 10;
if let Err(e) = client.import_block(create_test_block(&header)) {
panic!("error importing block which is valid by definition: {:?}", e);
}
}
}
pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
let test_spec = get_test_spec();
let client_db = new_db();
let client = Client::new(
ClientConfig::default(),
&test_spec,
client_db,
Arc::new(Miner::with_spec(&test_spec)),
IoChannel::disconnected(),
).unwrap();
for block in &blocks {
if client.import_block(block.clone()).is_err() {
panic!("panic importing block which is well-formed");
}
}
client.flush_queue();
client.import_verified_blocks();
client
}
fn new_db() -> Arc<KeyValueDB> {
Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)))
}
pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&mut batch, &create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
bc.commit();
}
db.write(batch).unwrap();
bc
}
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> BlockChain {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction();
for block_order in 1..block_number {
bc.insert_block(&mut batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
bc.commit();
}
db.write(batch).unwrap();
bc
}
pub fn generate_dummy_empty_blockchain() -> BlockChain |
pub fn get_temp_state() -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db();
State::new(journal_db, U256::from(0), Default::default())
}
pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db();
let mut factories = Factories::default();
factories.vm = factory;
State::new(journal_db, U256::from(0), factories)
}
pub fn get_temp_state_db() -> StateDB {
let db = new_db();
let journal_db = journaldb::new(db, journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
StateDB::new(journal_db, 5 * 1024 * 1024)
}
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
let test_spec = get_test_spec();
get_good_dummy_block_fork_seq(1, count, &test_spec.genesis_header().hash())
}
pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_hash: &H256) -> Vec<Bytes> {
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
let mut rolling_timestamp = start_number as u64 * 10;
let mut parent = *parent_hash;
let mut r = Vec::new();
for i in start_number.. start_number + count + 1 {
let mut block_header = Header::new();
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(i).mul(U256([0, 1, 0, 0])));
block_header.set_timestamp(rolling_timestamp);
block_header.set_number(i as u64);
block_header.set_parent_hash(parent);
block_header.set_state_root(test_spec.genesis_header().state_root().clone());
parent = block_header.hash();
rolling_timestamp = rolling_timestamp + 10;
r.push(create_test_block(&block_header));
}
r
}
pub fn get_good_dummy_block_hash() -> (H256, Bytes) {
let mut block_header = Header::new();
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40);
block_header.set_number(1);
block_header.set_parent_hash(test_spec.genesis_header().hash());
block_header.set_state_root(test_spec.genesis_header().state_root().clone());
(block_header.hash(), create_test_block(&block_header))
}
pub fn get_good_dummy_block() -> Bytes {
let (_, bytes) = get_good_dummy_block_hash();
bytes
}
pub fn get_bad_state_dummy_block() -> Bytes {
let mut block_header = Header::new();
let test_spec = get_test_spec();
let test_engine = &test_spec.engine;
block_header.set_gas_limit(test_engine.params().min_gas_limit);
block_header.set_difficulty(U256::from(0x20000));
block_header.set_timestamp(40);
block_header.set_number(1);
block_header.set_parent_hash(test_spec.genesis_header().hash());
block_header.set_state_root(0xbad.into());
create_test_block(&block_header)
}
pub fn get_default_ethash_params() -> EthashParams{
EthashParams {
gas_limit_bound_divisor: U256::from(1024),
minimum_difficulty: U256::from(131072),
difficulty_bound_divisor: U256::from(2048),
difficulty_increment_divisor: 10,
metropolis_difficulty_increment_divisor: 9,
duration_limit: 13,
block_reward: U256::from(0),
registrar: "0000000000000000000000000000000000000001".into(),
homestead_transition: 1150000,
dao_hardfork_transition: u64::max_value(),
dao_hardfork_beneficiary: "0000000000000000000000000000000000000001".into(),
dao_hardfork_accounts: vec![],
difficulty_hardfork_transition: u64::max_value(),
difficulty_hardfork_bound_divisor: U256::from(0),
bomb_defuse_transition: u64::max_value(),
eip100b_transition: u64::max_value(),
eip150_transition: u64::max_value(),
eip155_transition: u64::max_value(),
eip160_transition: u64::max_value(),
eip161abc_transition: u64::max_value(),
eip161d_transition: u64::max_value(),
ecip1010_pause_transition: u64::max_value(),
ecip1010_continue_transition: u64::max_value(),
ecip1017_era_rounds: u64::max_value(),
max_code_size: u64::max_value(),
max_gas_limit_transition: u64::max_value(),
max_gas_limit: U256::max_value(),
min_gas_price_transition: u64::max_value(),
min_gas_price: U256::zero(),
}
}
| {
let db = new_db();
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
bc
} | identifier_body |
shift-various-bad-types.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we can do shifts by any integral type.
struct Panolpy {
char: char,
str: &'static str,
}
fn | (p: &Panolpy) {
22 >> p.char;
//~^ ERROR E0277
//~| ERROR E0277
22 >> p.str;
//~^ ERROR E0277
//~| ERROR E0277
22 >> p;
//~^ ERROR E0277
//~| ERROR E0277
let x;
22 >> x; // ambiguity error winds up being suppressed
22 >> 1;
// Integer literal types are OK
// Type of the result follows the LHS, not the RHS:
let _: i32 = 22_i64 >> 1_i32;
//~^ ERROR mismatched types
//~| expected `i32`
//~| found `i64`
//~| expected i32
//~| found i64)
}
fn main() {
}
| foo | identifier_name |
shift-various-bad-types.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we can do shifts by any integral type.
struct Panolpy {
char: char,
str: &'static str,
}
fn foo(p: &Panolpy) {
22 >> p.char;
//~^ ERROR E0277
//~| ERROR E0277
22 >> p.str;
//~^ ERROR E0277
//~| ERROR E0277
22 >> p;
//~^ ERROR E0277
//~| ERROR E0277
let x;
22 >> x; // ambiguity error winds up being suppressed
22 >> 1;
// Integer literal types are OK
// Type of the result follows the LHS, not the RHS:
let _: i32 = 22_i64 >> 1_i32;
//~^ ERROR mismatched types
//~| expected `i32`
//~| found `i64`
//~| expected i32 | }
fn main() {
} | //~| found i64) | random_line_split |
FeatherFilter.rs | #pragma version(1)
#pragma rs java_package_name(cn.louispeng.imagefilter.renderscript)
// 羽化效果
#include "Clamp.rsh"
// set from the java SDK level
rs_allocation gIn;
rs_allocation gOut;
rs_script gScript;
// magic factor
const static float _Size = 0.5f;
// static variables
static uint32_t _width;
static uint32_t _height;
static float _ratio; | static uint32_t _max;
static uint32_t _min;
static uint32_t _diff;
static void setup() {
_width = rsAllocationGetDimX(gIn);
_height = rsAllocationGetDimY(gIn);
_ratio = (_width > _height)? ((float)_height / _width) : ((float)_width / _height);
_centerX = _width >> 1;
_centerY = _height >> 1;
_max = _centerX * _centerX + _centerY * _centerY;
_min = _max * (1 - _Size);
_diff = _max - _min;
}
void filter() {
setup();
rsForEach(gScript, gIn, gOut, 0, 0); // for each element of the input allocation,
// call root() method on gScript
}
void root(const uchar4 *v_in, uchar4 *v_out, const void *usrData, uint32_t x, uint32_t y) {
float4 f4 = rsUnpackColor8888(*v_in); // extract RGBA values, see rs_core.rsh
// Calculate distance to center and adapt aspect ratio
int32_t distanceX = _centerX - x;
int32_t distanceY = _centerY - y;
if (_width > _height){
distanceX = distanceX * _ratio;
} else {
distanceY = distanceY * _ratio;
}
uint32_t distSq = distanceX * distanceX + distanceY * distanceY;
float v = (float)distSq / _diff;
float3 f3 = f4.rgb + v;
f3 = FClamp01Float3(f3);
*v_out = rsPackColorTo8888(f3);
} | static uint32_t _centerX;
static uint32_t _centerY; | random_line_split |
FeatherFilter.rs | #pragma version(1)
#pragma rs java_package_name(cn.louispeng.imagefilter.renderscript)
// 羽化效果
#include "Clamp.rsh"
// set from the java SDK level
rs_allocation gIn;
rs_allocation gOut;
rs_script gScript;
// magic factor
const static float _Size = 0.5f;
// static variables
static uint32_t _width;
static uint32_t _height;
static float _ratio;
static uint32_t _centerX;
static uint32_t _centerY;
static uint32_t _max;
static uint32_t _min;
static uint32_t _diff;
static void setup() {
_width = rsAllocationGetDimX(gIn);
_height = rsAllocationGetDimY(gIn);
_ratio = (_width > _height)? ((float)_height / _width) : ((float)_width / _height);
_centerX = _width >> 1;
_centerY = _height >> 1;
_max = _centerX * _centerX + _centerY * _centerY;
_min = _max * (1 - _Size);
_diff = _max - _min;
}
void filter() {
setup();
rsForEach(gScript, gIn, gOut, 0, 0); // for each element of the input allocation,
// call root() method on gScript
}
void root(const uchar4 *v_in, uchar4 *v_out, const void *usrData, uint32_t x, uint32_t y) {
float4 f4 = rsUnpackColor8888(*v_in); // extract RGBA values, see rs_core.rsh
// Calculate distance to center and adapt aspect ratio
int32_t distanceX = _centerX - x;
int32_t distanceY = _centerY - y;
if (_width > _height){
dist | distanceY = distanceY * _ratio;
}
uint32_t distSq = distanceX * distanceX + distanceY * distanceY;
float v = (float)distSq / _diff;
float3 f3 = f4.rgb + v;
f3 = FClamp01Float3(f3);
*v_out = rsPackColorTo8888(f3);
} | anceX = distanceX * _ratio;
} else {
| conditional_block |
build.rs | use std::env;
fn main() {
let target = env::var("TARGET").unwrap_or("".to_string());
let tundra_dir = env::var("TUNDRA_OBJECTDIR").unwrap_or("".to_string());
let libs = env::var("TUNDRA_STATIC_LIBS").unwrap_or("".to_string());
let native_libs = libs.split(" ");
println!("cargo:rustc-link-search=native={}", tundra_dir);
for lib in native_libs {
println!("cargo:rustc-link-lib=static={}", lib);
println!("cargo:rerun-if-changed={}", lib);
}
if target.contains("darwin") | else if target.contains("windows") {
} else {
println!("cargo:rustc-flags=-l dylib=stdc++");
println!("cargo:rustc-flags=-l dylib=X11");
println!("cargo:rustc-flags=-l dylib=GL");
println!("cargo:rustc-flags=-l dylib=dl");
}
}
| {
println!("cargo:rustc-flags=-l dylib=stdc++");
println!("cargo:rustc-flags=-l framework=Cocoa");
println!("cargo:rustc-flags=-l framework=Metal");
println!("cargo:rustc-flags=-l framework=OpenGL");
println!("cargo:rustc-flags=-l framework=QuartzCore");
} | conditional_block |
build.rs | use std::env;
fn main() | } else if target.contains("windows") {
} else {
println!("cargo:rustc-flags=-l dylib=stdc++");
println!("cargo:rustc-flags=-l dylib=X11");
println!("cargo:rustc-flags=-l dylib=GL");
println!("cargo:rustc-flags=-l dylib=dl");
}
}
| {
let target = env::var("TARGET").unwrap_or("".to_string());
let tundra_dir = env::var("TUNDRA_OBJECTDIR").unwrap_or("".to_string());
let libs = env::var("TUNDRA_STATIC_LIBS").unwrap_or("".to_string());
let native_libs = libs.split(" ");
println!("cargo:rustc-link-search=native={}", tundra_dir);
for lib in native_libs {
println!("cargo:rustc-link-lib=static={}", lib);
println!("cargo:rerun-if-changed={}", lib);
}
if target.contains("darwin") {
println!("cargo:rustc-flags=-l dylib=stdc++");
println!("cargo:rustc-flags=-l framework=Cocoa");
println!("cargo:rustc-flags=-l framework=Metal");
println!("cargo:rustc-flags=-l framework=OpenGL");
println!("cargo:rustc-flags=-l framework=QuartzCore"); | identifier_body |
build.rs | use std::env;
fn main() {
let target = env::var("TARGET").unwrap_or("".to_string());
let tundra_dir = env::var("TUNDRA_OBJECTDIR").unwrap_or("".to_string());
let libs = env::var("TUNDRA_STATIC_LIBS").unwrap_or("".to_string());
let native_libs = libs.split(" ");
|
for lib in native_libs {
println!("cargo:rustc-link-lib=static={}", lib);
println!("cargo:rerun-if-changed={}", lib);
}
if target.contains("darwin") {
println!("cargo:rustc-flags=-l dylib=stdc++");
println!("cargo:rustc-flags=-l framework=Cocoa");
println!("cargo:rustc-flags=-l framework=Metal");
println!("cargo:rustc-flags=-l framework=OpenGL");
println!("cargo:rustc-flags=-l framework=QuartzCore");
} else if target.contains("windows") {
} else {
println!("cargo:rustc-flags=-l dylib=stdc++");
println!("cargo:rustc-flags=-l dylib=X11");
println!("cargo:rustc-flags=-l dylib=GL");
println!("cargo:rustc-flags=-l dylib=dl");
}
} | println!("cargo:rustc-link-search=native={}", tundra_dir); | random_line_split |
build.rs | use std::env;
fn | () {
let target = env::var("TARGET").unwrap_or("".to_string());
let tundra_dir = env::var("TUNDRA_OBJECTDIR").unwrap_or("".to_string());
let libs = env::var("TUNDRA_STATIC_LIBS").unwrap_or("".to_string());
let native_libs = libs.split(" ");
println!("cargo:rustc-link-search=native={}", tundra_dir);
for lib in native_libs {
println!("cargo:rustc-link-lib=static={}", lib);
println!("cargo:rerun-if-changed={}", lib);
}
if target.contains("darwin") {
println!("cargo:rustc-flags=-l dylib=stdc++");
println!("cargo:rustc-flags=-l framework=Cocoa");
println!("cargo:rustc-flags=-l framework=Metal");
println!("cargo:rustc-flags=-l framework=OpenGL");
println!("cargo:rustc-flags=-l framework=QuartzCore");
} else if target.contains("windows") {
} else {
println!("cargo:rustc-flags=-l dylib=stdc++");
println!("cargo:rustc-flags=-l dylib=X11");
println!("cargo:rustc-flags=-l dylib=GL");
println!("cargo:rustc-flags=-l dylib=dl");
}
}
| main | identifier_name |
lex_unique_vecs_length_inclusive_range.rs | use itertools::Itertools;
use malachite_base::bools::exhaustive::exhaustive_bools;
use malachite_base::nevers::nevers;
use malachite_base::tuples::exhaustive::exhaustive_units;
use malachite_base::vecs::exhaustive::lex_unique_vecs_length_inclusive_range;
use std::fmt::Debug;
fn lex_unique_vecs_length_inclusive_range_small_helper<I: Clone + Iterator>(
a: u64,
b: u64,
xs: I,
out_len: usize,
out: &[&[I::Item]],
) where
I::Item: Clone + Debug + Eq,
{
let xss = lex_unique_vecs_length_inclusive_range(a, b, xs);
let xss_prefix = xss.clone().take(20).collect_vec();
assert_eq!(
xss_prefix
.iter()
.map(Vec::as_slice)
.collect_vec()
.as_slice(),
out
);
assert_eq!(xss.count(), out_len);
}
#[test]
fn test_lex_unique_vecs_length_inclusive_range() {
lex_unique_vecs_length_inclusive_range_small_helper(0, 4, nevers(), 1, &[&[]]);
lex_unique_vecs_length_inclusive_range_small_helper(6, 9, nevers(), 0, &[]);
lex_unique_vecs_length_inclusive_range_small_helper(0, 4, exhaustive_units(), 2, &[&[], &[()]]);
lex_unique_vecs_length_inclusive_range_small_helper(1, 0, exhaustive_bools(), 0, &[]);
lex_unique_vecs_length_inclusive_range_small_helper(
0,
1,
exhaustive_bools(),
3,
&[&[], &[false], &[true]],
);
lex_unique_vecs_length_inclusive_range_small_helper(
2,
3,
exhaustive_bools(),
2, | 1,
'a'..='c',
3,
&[&['a'], &['b'], &['c']],
);
} | &[&[false, true], &[true, false]],
);
lex_unique_vecs_length_inclusive_range_small_helper(
1, | random_line_split |
lex_unique_vecs_length_inclusive_range.rs | use itertools::Itertools;
use malachite_base::bools::exhaustive::exhaustive_bools;
use malachite_base::nevers::nevers;
use malachite_base::tuples::exhaustive::exhaustive_units;
use malachite_base::vecs::exhaustive::lex_unique_vecs_length_inclusive_range;
use std::fmt::Debug;
fn lex_unique_vecs_length_inclusive_range_small_helper<I: Clone + Iterator>(
a: u64,
b: u64,
xs: I,
out_len: usize,
out: &[&[I::Item]],
) where
I::Item: Clone + Debug + Eq,
{
let xss = lex_unique_vecs_length_inclusive_range(a, b, xs);
let xss_prefix = xss.clone().take(20).collect_vec();
assert_eq!(
xss_prefix
.iter()
.map(Vec::as_slice)
.collect_vec()
.as_slice(),
out
);
assert_eq!(xss.count(), out_len);
}
#[test]
fn test_lex_unique_vecs_length_inclusive_range() | 1,
1,
'a'..='c',
3,
&[&['a'], &['b'], &['c']],
);
}
| {
lex_unique_vecs_length_inclusive_range_small_helper(0, 4, nevers(), 1, &[&[]]);
lex_unique_vecs_length_inclusive_range_small_helper(6, 9, nevers(), 0, &[]);
lex_unique_vecs_length_inclusive_range_small_helper(0, 4, exhaustive_units(), 2, &[&[], &[()]]);
lex_unique_vecs_length_inclusive_range_small_helper(1, 0, exhaustive_bools(), 0, &[]);
lex_unique_vecs_length_inclusive_range_small_helper(
0,
1,
exhaustive_bools(),
3,
&[&[], &[false], &[true]],
);
lex_unique_vecs_length_inclusive_range_small_helper(
2,
3,
exhaustive_bools(),
2,
&[&[false, true], &[true, false]],
);
lex_unique_vecs_length_inclusive_range_small_helper( | identifier_body |
lex_unique_vecs_length_inclusive_range.rs | use itertools::Itertools;
use malachite_base::bools::exhaustive::exhaustive_bools;
use malachite_base::nevers::nevers;
use malachite_base::tuples::exhaustive::exhaustive_units;
use malachite_base::vecs::exhaustive::lex_unique_vecs_length_inclusive_range;
use std::fmt::Debug;
fn lex_unique_vecs_length_inclusive_range_small_helper<I: Clone + Iterator>(
a: u64,
b: u64,
xs: I,
out_len: usize,
out: &[&[I::Item]],
) where
I::Item: Clone + Debug + Eq,
{
let xss = lex_unique_vecs_length_inclusive_range(a, b, xs);
let xss_prefix = xss.clone().take(20).collect_vec();
assert_eq!(
xss_prefix
.iter()
.map(Vec::as_slice)
.collect_vec()
.as_slice(),
out
);
assert_eq!(xss.count(), out_len);
}
#[test]
fn | () {
lex_unique_vecs_length_inclusive_range_small_helper(0, 4, nevers(), 1, &[&[]]);
lex_unique_vecs_length_inclusive_range_small_helper(6, 9, nevers(), 0, &[]);
lex_unique_vecs_length_inclusive_range_small_helper(0, 4, exhaustive_units(), 2, &[&[], &[()]]);
lex_unique_vecs_length_inclusive_range_small_helper(1, 0, exhaustive_bools(), 0, &[]);
lex_unique_vecs_length_inclusive_range_small_helper(
0,
1,
exhaustive_bools(),
3,
&[&[], &[false], &[true]],
);
lex_unique_vecs_length_inclusive_range_small_helper(
2,
3,
exhaustive_bools(),
2,
&[&[false, true], &[true, false]],
);
lex_unique_vecs_length_inclusive_range_small_helper(
1,
1,
'a'..='c',
3,
&[&['a'], &['b'], &['c']],
);
}
| test_lex_unique_vecs_length_inclusive_range | identifier_name |
time.rs | // Copyright 2015 juggle-tux
//
// This file is part of srttool.
//
// srttool is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// srttool is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with srttool. If not, see <http://www.gnu.org/licenses/>.
//
use std::cmp::Eq;
use std::convert::From;
use std::error::Error;
use std::fmt::{self, Display};
use std::ops::{Add, Sub};
use std::str::FromStr;
use std::time::Duration;
use error::ParseError;
/// start and end time of a Block
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct StartEnd(pub Time, pub Time);
impl Add for StartEnd {
type Output = StartEnd;
fn add(self, rhs: StartEnd) -> StartEnd {
StartEnd(self.0 + rhs.0, self.1 + rhs.1)
}
}
impl Add<Time> for StartEnd {
type Output = StartEnd;
fn add(self, rhs: Time) -> StartEnd {
self + rhs.0
}
}
impl Add<Duration> for StartEnd {
type Output = StartEnd;
fn add(self, rhs: Duration) -> StartEnd {
StartEnd(self.0 + rhs, self.1 + rhs)
}
}
impl Sub for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: StartEnd) -> StartEnd {
StartEnd(self.0 - rhs.0, self.1 - rhs.1)
}
}
impl Sub<Time> for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: Time) -> StartEnd {
self - rhs.0
}
}
impl Sub<Duration> for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: Duration) -> StartEnd {
StartEnd(self.0 - rhs, self.1 - rhs)
}
}
impl From<Duration> for StartEnd {
fn from(d: Duration) -> StartEnd {
StartEnd(Time::from(d), Time::from(d))
}
}
impl From<Time> for StartEnd {
fn from(t: Time) -> StartEnd {
StartEnd(t, t)
}
}
impl FromStr for StartEnd {
type Err = ParseError;
fn from_str(s: &str) -> Result<StartEnd, ParseError> {
let buf: Vec<_> = s.splitn(2, " --> ")
.filter_map(|s| Time::from_str(s).ok())
.collect();
if buf.len()!= 2 |
return Ok(StartEnd(buf[0], buf[1]));
}
}
impl Display for StartEnd {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{} --> {}", self.0, self.1)
}
}
/// used for the start or end time of a Block
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Time(pub Duration);
impl Default for Time {
fn default() -> Time {
Time(Duration::new(0, 0))
}
}
impl Add for Time {
type Output = Time;
fn add(self, rhs: Time) -> Time {
self + rhs.0
}
}
impl Add<Duration> for Time {
type Output = Time;
fn add(self, rhs: Duration) -> Time {
Time(self.0 + rhs)
}
}
impl Sub for Time {
type Output = Time;
fn sub(self, rhs: Time) -> Time {
self - rhs.0
}
}
impl Sub<Duration> for Time {
type Output = Time;
fn sub(self, rhs: Duration) -> Time {
if self.0.gt(&rhs) {
Time(self.0 - rhs)
} else {
Time::default()
}
}
}
impl From<Duration> for Time {
fn from(d: Duration) -> Time {
Time(d)
}
}
impl From<Time> for Duration {
fn from(t: Time) -> Duration {
t.0
}
}
impl From<(usize, usize, usize, usize)> for Time {
fn from(h_m_s_ms: (usize, usize, usize, usize)) -> Time {
let (h, m, s, ms) = h_m_s_ms;
Time(Duration::new(h as u64 * 60 * 60 + m as u64 * 60 + s as u64,
ms as u32 * 1_000_000))
}
}
/// parses a &str to a Time where &str is "HH:MM:SS,ms"
impl FromStr for Time {
type Err = ParseError;
fn from_str(s: &str) -> Result<Time, ParseError> {
let buf: Vec<usize> = s.splitn(2, ",")
.flat_map(|s| s.splitn(3, ":"))
.filter_map(|s| s.parse().ok())
.collect();
if buf.len()!= 4 {
return Err(ParseError::InvalidTimeString);
}
return Ok(Time::from((buf[0], buf[1], buf[2], buf[3])));
}
}
impl Display for Time {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let ms = self.0.subsec_nanos() / 1_000_000;
let mut t = self.0.as_secs();
let s = t % 60;
t /= 60;
let m = t % 60;
let h = t / 60;
write!(f, "{:0>2}:{:0>2}:{:0>2},{:0>3}", h, m, s, ms)
}
}
| {
return Err(ParseError::InvalidTimeLine);
} | conditional_block |
time.rs | // Copyright 2015 juggle-tux
//
// This file is part of srttool.
//
// srttool is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// srttool is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with srttool. If not, see <http://www.gnu.org/licenses/>.
//
use std::cmp::Eq;
use std::convert::From;
use std::error::Error;
use std::fmt::{self, Display};
use std::ops::{Add, Sub};
use std::str::FromStr;
use std::time::Duration;
use error::ParseError;
/// start and end time of a Block
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct StartEnd(pub Time, pub Time);
impl Add for StartEnd {
type Output = StartEnd;
fn add(self, rhs: StartEnd) -> StartEnd {
StartEnd(self.0 + rhs.0, self.1 + rhs.1)
}
}
impl Add<Time> for StartEnd {
type Output = StartEnd;
fn add(self, rhs: Time) -> StartEnd {
self + rhs.0
}
}
impl Add<Duration> for StartEnd {
type Output = StartEnd;
fn add(self, rhs: Duration) -> StartEnd {
StartEnd(self.0 + rhs, self.1 + rhs)
}
}
impl Sub for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: StartEnd) -> StartEnd {
StartEnd(self.0 - rhs.0, self.1 - rhs.1)
}
}
impl Sub<Time> for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: Time) -> StartEnd {
self - rhs.0
}
}
impl Sub<Duration> for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: Duration) -> StartEnd {
StartEnd(self.0 - rhs, self.1 - rhs)
}
}
impl From<Duration> for StartEnd {
fn from(d: Duration) -> StartEnd {
StartEnd(Time::from(d), Time::from(d))
}
}
impl From<Time> for StartEnd {
fn from(t: Time) -> StartEnd {
StartEnd(t, t)
}
}
impl FromStr for StartEnd {
type Err = ParseError;
fn from_str(s: &str) -> Result<StartEnd, ParseError> {
let buf: Vec<_> = s.splitn(2, " --> ")
.filter_map(|s| Time::from_str(s).ok())
.collect();
if buf.len()!= 2 {
return Err(ParseError::InvalidTimeLine);
}
return Ok(StartEnd(buf[0], buf[1]));
}
}
impl Display for StartEnd {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{} --> {}", self.0, self.1)
}
}
/// used for the start or end time of a Block
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Time(pub Duration);
impl Default for Time {
fn default() -> Time {
Time(Duration::new(0, 0))
}
}
impl Add for Time {
type Output = Time;
fn add(self, rhs: Time) -> Time {
self + rhs.0
}
}
impl Add<Duration> for Time {
type Output = Time;
fn add(self, rhs: Duration) -> Time {
Time(self.0 + rhs)
}
}
impl Sub for Time {
type Output = Time;
fn sub(self, rhs: Time) -> Time {
self - rhs.0
}
}
impl Sub<Duration> for Time {
type Output = Time;
fn | (self, rhs: Duration) -> Time {
if self.0.gt(&rhs) {
Time(self.0 - rhs)
} else {
Time::default()
}
}
}
impl From<Duration> for Time {
fn from(d: Duration) -> Time {
Time(d)
}
}
impl From<Time> for Duration {
fn from(t: Time) -> Duration {
t.0
}
}
impl From<(usize, usize, usize, usize)> for Time {
fn from(h_m_s_ms: (usize, usize, usize, usize)) -> Time {
let (h, m, s, ms) = h_m_s_ms;
Time(Duration::new(h as u64 * 60 * 60 + m as u64 * 60 + s as u64,
ms as u32 * 1_000_000))
}
}
/// parses a &str to a Time where &str is "HH:MM:SS,ms"
impl FromStr for Time {
type Err = ParseError;
fn from_str(s: &str) -> Result<Time, ParseError> {
let buf: Vec<usize> = s.splitn(2, ",")
.flat_map(|s| s.splitn(3, ":"))
.filter_map(|s| s.parse().ok())
.collect();
if buf.len()!= 4 {
return Err(ParseError::InvalidTimeString);
}
return Ok(Time::from((buf[0], buf[1], buf[2], buf[3])));
}
}
impl Display for Time {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let ms = self.0.subsec_nanos() / 1_000_000;
let mut t = self.0.as_secs();
let s = t % 60;
t /= 60;
let m = t % 60;
let h = t / 60;
write!(f, "{:0>2}:{:0>2}:{:0>2},{:0>3}", h, m, s, ms)
}
}
| sub | identifier_name |
time.rs | // Copyright 2015 juggle-tux
//
// This file is part of srttool.
//
// srttool is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// srttool is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with srttool. If not, see <http://www.gnu.org/licenses/>.
//
use std::cmp::Eq;
use std::convert::From;
use std::error::Error;
use std::fmt::{self, Display};
use std::ops::{Add, Sub};
use std::str::FromStr;
use std::time::Duration;
use error::ParseError;
| pub struct StartEnd(pub Time, pub Time);
impl Add for StartEnd {
type Output = StartEnd;
fn add(self, rhs: StartEnd) -> StartEnd {
StartEnd(self.0 + rhs.0, self.1 + rhs.1)
}
}
impl Add<Time> for StartEnd {
type Output = StartEnd;
fn add(self, rhs: Time) -> StartEnd {
self + rhs.0
}
}
impl Add<Duration> for StartEnd {
type Output = StartEnd;
fn add(self, rhs: Duration) -> StartEnd {
StartEnd(self.0 + rhs, self.1 + rhs)
}
}
impl Sub for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: StartEnd) -> StartEnd {
StartEnd(self.0 - rhs.0, self.1 - rhs.1)
}
}
impl Sub<Time> for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: Time) -> StartEnd {
self - rhs.0
}
}
impl Sub<Duration> for StartEnd {
type Output = StartEnd;
fn sub(self, rhs: Duration) -> StartEnd {
StartEnd(self.0 - rhs, self.1 - rhs)
}
}
impl From<Duration> for StartEnd {
fn from(d: Duration) -> StartEnd {
StartEnd(Time::from(d), Time::from(d))
}
}
impl From<Time> for StartEnd {
fn from(t: Time) -> StartEnd {
StartEnd(t, t)
}
}
impl FromStr for StartEnd {
type Err = ParseError;
fn from_str(s: &str) -> Result<StartEnd, ParseError> {
let buf: Vec<_> = s.splitn(2, " --> ")
.filter_map(|s| Time::from_str(s).ok())
.collect();
if buf.len()!= 2 {
return Err(ParseError::InvalidTimeLine);
}
return Ok(StartEnd(buf[0], buf[1]));
}
}
impl Display for StartEnd {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{} --> {}", self.0, self.1)
}
}
/// used for the start or end time of a Block
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Time(pub Duration);
impl Default for Time {
fn default() -> Time {
Time(Duration::new(0, 0))
}
}
impl Add for Time {
type Output = Time;
fn add(self, rhs: Time) -> Time {
self + rhs.0
}
}
impl Add<Duration> for Time {
type Output = Time;
fn add(self, rhs: Duration) -> Time {
Time(self.0 + rhs)
}
}
impl Sub for Time {
type Output = Time;
fn sub(self, rhs: Time) -> Time {
self - rhs.0
}
}
impl Sub<Duration> for Time {
type Output = Time;
fn sub(self, rhs: Duration) -> Time {
if self.0.gt(&rhs) {
Time(self.0 - rhs)
} else {
Time::default()
}
}
}
impl From<Duration> for Time {
fn from(d: Duration) -> Time {
Time(d)
}
}
impl From<Time> for Duration {
fn from(t: Time) -> Duration {
t.0
}
}
impl From<(usize, usize, usize, usize)> for Time {
fn from(h_m_s_ms: (usize, usize, usize, usize)) -> Time {
let (h, m, s, ms) = h_m_s_ms;
Time(Duration::new(h as u64 * 60 * 60 + m as u64 * 60 + s as u64,
ms as u32 * 1_000_000))
}
}
/// parses a &str to a Time where &str is "HH:MM:SS,ms"
impl FromStr for Time {
type Err = ParseError;
fn from_str(s: &str) -> Result<Time, ParseError> {
let buf: Vec<usize> = s.splitn(2, ",")
.flat_map(|s| s.splitn(3, ":"))
.filter_map(|s| s.parse().ok())
.collect();
if buf.len()!= 4 {
return Err(ParseError::InvalidTimeString);
}
return Ok(Time::from((buf[0], buf[1], buf[2], buf[3])));
}
}
impl Display for Time {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let ms = self.0.subsec_nanos() / 1_000_000;
let mut t = self.0.as_secs();
let s = t % 60;
t /= 60;
let m = t % 60;
let h = t / 60;
write!(f, "{:0>2}:{:0>2}:{:0>2},{:0>3}", h, m, s, ms)
}
} | /// start and end time of a Block
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] | random_line_split |
unwind-box-res.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:fail
use std::cast;
fn | () {
fail!();
}
struct r {
v: *int,
}
impl Drop for r {
fn drop(&mut self) {
unsafe {
let _v2: ~int = cast::transmute(self.v);
}
}
}
fn r(v: *int) -> r {
r {
v: v
}
}
fn main() {
unsafe {
let i1 = ~0;
let i1p = cast::transmute_copy(&i1);
cast::forget(i1);
let x = @r(i1p);
failfn();
error!(x);
}
}
| failfn | identifier_name |
unwind-box-res.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:fail
use std::cast;
fn failfn() {
fail!();
}
struct r {
v: *int,
}
impl Drop for r {
fn drop(&mut self) {
unsafe {
let _v2: ~int = cast::transmute(self.v);
}
}
}
fn r(v: *int) -> r {
r {
v: v
}
}
fn main() | {
unsafe {
let i1 = ~0;
let i1p = cast::transmute_copy(&i1);
cast::forget(i1);
let x = @r(i1p);
failfn();
error!(x);
}
} | identifier_body |
|
unwind-box-res.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:fail
use std::cast;
fn failfn() {
fail!();
}
struct r {
v: *int,
}
impl Drop for r {
fn drop(&mut self) { |
fn r(v: *int) -> r {
r {
v: v
}
}
fn main() {
unsafe {
let i1 = ~0;
let i1p = cast::transmute_copy(&i1);
cast::forget(i1);
let x = @r(i1p);
failfn();
error!(x);
}
} | unsafe {
let _v2: ~int = cast::transmute(self.v);
}
}
} | random_line_split |
mutex.rs | use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut, Drop};
use core::sync::atomic::{AtomicBool, Ordering};
use common::debug::*;
use syscall::call::sys_yield;
/// A mutex, i.e. a form of safe shared memory between threads. See rust std's Mutex.
pub struct Mutex<T:?Sized> {
lock: AtomicBool,
value: UnsafeCell<T>,
}
impl<T> Mutex<T> {
/// Create a new mutex with value `value`.
pub fn new(value: T) -> Self {
Mutex {
lock: AtomicBool::new(false),
value: UnsafeCell::new(value),
}
}
}
impl<T:?Sized> Mutex<T> {
/// Lock the mutex
pub fn lock(&self) -> MutexGuard<T> {
while self.lock.compare_and_swap(false, true, Ordering::SeqCst) {
sys_yield();
}
MutexGuard::new(&self.lock, &self.value)
}
}
unsafe impl<T:?Sized + Send> Send for Mutex<T> { }
unsafe impl<T:?Sized + Send> Sync for Mutex<T> { }
/// A mutex guard (returned by.lock())
pub struct MutexGuard<'a, T:?Sized + 'a> {
lock: &'a AtomicBool,
data: &'a UnsafeCell<T>,
}
impl<'mutex, T:?Sized> MutexGuard<'mutex, T> {
fn new(lock: &'mutex AtomicBool, data: &'mutex UnsafeCell<T>) -> Self {
MutexGuard {
lock: lock,
data: data,
}
}
}
impl<'mutex, T:?Sized> Deref for MutexGuard<'mutex, T> {
type Target = T;
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*self.data.get() }
}
}
impl<'mutex, T:?Sized> DerefMut for MutexGuard<'mutex, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *self.data.get() }
}
}
impl<'a, T:?Sized> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
if!self.lock.compare_and_swap(true, false, Ordering::SeqCst) |
}
}
| {
d("Mutex was already unlocked!\n");
} | conditional_block |
mutex.rs | use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut, Drop};
use core::sync::atomic::{AtomicBool, Ordering};
use common::debug::*;
use syscall::call::sys_yield;
/// A mutex, i.e. a form of safe shared memory between threads. See rust std's Mutex.
pub struct Mutex<T:?Sized> {
lock: AtomicBool,
value: UnsafeCell<T>,
}
impl<T> Mutex<T> {
/// Create a new mutex with value `value`.
pub fn new(value: T) -> Self {
Mutex {
lock: AtomicBool::new(false),
value: UnsafeCell::new(value),
}
}
}
impl<T:?Sized> Mutex<T> {
/// Lock the mutex
pub fn lock(&self) -> MutexGuard<T> {
while self.lock.compare_and_swap(false, true, Ordering::SeqCst) {
sys_yield();
}
MutexGuard::new(&self.lock, &self.value)
}
}
unsafe impl<T:?Sized + Send> Send for Mutex<T> { }
unsafe impl<T:?Sized + Send> Sync for Mutex<T> { }
/// A mutex guard (returned by.lock())
pub struct MutexGuard<'a, T:?Sized + 'a> {
lock: &'a AtomicBool,
data: &'a UnsafeCell<T>,
}
impl<'mutex, T:?Sized> MutexGuard<'mutex, T> {
fn new(lock: &'mutex AtomicBool, data: &'mutex UnsafeCell<T>) -> Self {
MutexGuard {
lock: lock,
data: data,
}
}
}
impl<'mutex, T:?Sized> Deref for MutexGuard<'mutex, T> {
type Target = T;
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*self.data.get() }
}
}
impl<'mutex, T:?Sized> DerefMut for MutexGuard<'mutex, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *self.data.get() }
}
}
impl<'a, T:?Sized> Drop for MutexGuard<'a, T> {
fn | (&mut self) {
if!self.lock.compare_and_swap(true, false, Ordering::SeqCst) {
d("Mutex was already unlocked!\n");
}
}
}
| drop | identifier_name |
mutex.rs | use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut, Drop};
use core::sync::atomic::{AtomicBool, Ordering};
use common::debug::*;
use syscall::call::sys_yield;
/// A mutex, i.e. a form of safe shared memory between threads. See rust std's Mutex.
pub struct Mutex<T:?Sized> {
lock: AtomicBool,
value: UnsafeCell<T>,
}
impl<T> Mutex<T> {
/// Create a new mutex with value `value`.
pub fn new(value: T) -> Self {
Mutex {
lock: AtomicBool::new(false),
value: UnsafeCell::new(value),
}
}
}
impl<T:?Sized> Mutex<T> {
/// Lock the mutex
pub fn lock(&self) -> MutexGuard<T> {
while self.lock.compare_and_swap(false, true, Ordering::SeqCst) {
sys_yield();
}
MutexGuard::new(&self.lock, &self.value)
}
}
unsafe impl<T:?Sized + Send> Send for Mutex<T> { }
unsafe impl<T:?Sized + Send> Sync for Mutex<T> { }
/// A mutex guard (returned by.lock())
pub struct MutexGuard<'a, T:?Sized + 'a> {
lock: &'a AtomicBool,
data: &'a UnsafeCell<T>,
}
impl<'mutex, T:?Sized> MutexGuard<'mutex, T> {
fn new(lock: &'mutex AtomicBool, data: &'mutex UnsafeCell<T>) -> Self {
MutexGuard {
lock: lock,
data: data,
}
}
}
| impl<'mutex, T:?Sized> Deref for MutexGuard<'mutex, T> {
type Target = T;
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*self.data.get() }
}
}
impl<'mutex, T:?Sized> DerefMut for MutexGuard<'mutex, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *self.data.get() }
}
}
impl<'a, T:?Sized> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
if!self.lock.compare_and_swap(true, false, Ordering::SeqCst) {
d("Mutex was already unlocked!\n");
}
}
} | random_line_split |
|
xorshift.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Xorshift family of random number generators.
use std::num::Wrapping as w;
use {Rng, SeedableRng, Rand, w32, w64};
/// A Xorshift128+[1] random number generator.
///
/// The Xorshift128+ algorithm is not suitable for cryptographic purposes
/// but is very fast. If you do not know for sure that it fits your
/// requirements, use a more secure one such as `IsaacRng` or `OsRng`.
/// This variant uses 64bit arithmetic and is appropriated for 64bit architectures.
/// Compared to Xorshift128 this variant also produces a higher quality distribution.
///
/// [1]: Vigna, S. (2014). [Further scramblings of
/// Marsaglia's xorshift generators](http://arxiv.org/pdf/1404.0390.pdf).
/// arXiv preprint arXiv:1404.0390.
#[derive(Copy, Clone)]
pub struct XorShiftPlusRng {
s: [w64; 2]
}
impl XorShiftPlusRng {
/// Creates a new XorShiftPlusRng instance which is not seeded.
///
/// The initial values of this RNG are constants, so all generators created
/// by this function will yield the same stream of random numbers. It is
/// highly recommended that this is created through `SeedableRng` instead of
/// this function
pub fn new_unseeded() -> XorShiftPlusRng {
XorShiftPlusRng { | impl Rng for XorShiftPlusRng {
#[inline]
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
#[inline]
fn next_u64(&mut self) -> u64 {
let mut s1 = self.s[0];
let s0 = self.s[1];
self.s[0] = s0;
s1 = s1 ^ (s1 << 23); // a
self.s[1] = s1 ^ s0 ^ (s1 >> 17) ^ (s0 >> 26); // b, c
(self.s[1] + s0).0
}
}
impl SeedableRng<[u64; 2]> for XorShiftPlusRng {
/// Reseed an XorShiftPlusRng. This will panic if `seed` is entirely 0.
fn reseed(&mut self, seed: [u64; 2]) {
assert!(seed!= [0, 0],
"XorShiftPlusRng.reseed called with an all zero seed.");
self.s = [w(seed[0]), w(seed[1])];
}
/// Create a new XorShiftPlusRng. This will panic if `seed` is entirely 0.
fn from_seed(seed: [u64; 2]) -> XorShiftPlusRng {
assert!(seed!= [0, 0],
"XorShiftPlusRng::from_seed called with an all zero seed.");
XorShiftPlusRng {
s: [w(seed[0]), w(seed[1])]
}
}
}
impl Rand for XorShiftPlusRng {
fn rand<R: Rng>(rng: &mut R) -> XorShiftPlusRng {
let mut seed: (u64, u64) = rng.gen();
while seed == (0, 0) {
seed = rng.gen();
}
XorShiftPlusRng { s: [w(seed.0), w(seed.1)] }
}
}
/// An Xorshift128[1] random number generator.
///
/// The Xorshift128 algorithm is not suitable for cryptographic purposes
/// but is very fast. If you do not know for sure that it fits your
/// requirements, use a more secure one such as `IsaacRng` or `OsRng`.
/// This variant uses 32bit arithmetic and is appropriated for 32bit architectures.
///
/// [1]: Marsaglia, George (July 2003). ["Xorshift
/// RNGs"](http://www.jstatsoft.org/v08/i14/paper). *Journal of
/// Statistical Software*. Vol. 8 (Issue 14).
#[derive(Copy, Clone)]
pub struct XorShiftRng {
x: w32,
y: w32,
z: w32,
w: w32,
}
impl XorShiftRng {
/// Creates a new XorShiftRng instance which is not seeded.
///
/// The initial values of this RNG are constants, so all generators created
/// by this function will yield the same stream of random numbers. It is
/// highly recommended that this is created through `SeedableRng` instead of
/// this function
pub fn new_unseeded() -> XorShiftRng {
XorShiftRng {
x: w(0x193a6754),
y: w(0xa8a7d469),
z: w(0x97830e05),
w: w(0x113ba7bb),
}
}
}
impl Rng for XorShiftRng {
#[inline]
fn next_u32(&mut self) -> u32 {
let x = self.x;
let t = x ^ (x << 11);
self.x = self.y;
self.y = self.z;
self.z = self.w;
let w_ = self.w;
self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
self.w.0
}
}
impl SeedableRng<[u32; 4]> for XorShiftRng {
/// Reseed an XorShiftRng. This will panic if `seed` is entirely 0.
fn reseed(&mut self, seed: [u32; 4]) {
assert!(seed!= [0, 0, 0, 0],
"XorShiftRng.reseed called with an all zero seed.");
self.x = w(seed[0]);
self.y = w(seed[1]);
self.z = w(seed[2]);
self.w = w(seed[3]);
}
/// Create a new XorShiftRng. This will panic if `seed` is entirely 0.
fn from_seed(seed: [u32; 4]) -> XorShiftRng {
assert!(seed!= [0, 0, 0, 0],
"XorShiftRng::from_seed called with an all zero seed.");
XorShiftRng {
x: w(seed[0]),
y: w(seed[1]),
z: w(seed[2]),
w: w(seed[3]),
}
}
}
impl Rand for XorShiftRng {
fn rand<R: Rng>(rng: &mut R) -> XorShiftRng {
let mut tuple: (u32, u32, u32, u32) = rng.gen();
while tuple == (0, 0, 0, 0) {
tuple = rng.gen();
}
let (x, y, z, w_) = tuple;
XorShiftRng { x: w(x), y: w(y), z: w(z), w: w(w_) }
}
}
#[cfg(test)]
mod tests {
use SeedableRng;
use super::{XorShiftRng, XorShiftPlusRng};
#[test]
#[should_panic]
fn test_xorshift64_zero_seed() {
let _: XorShiftRng = SeedableRng::from_seed([0, 0, 0, 0]);
}
#[test]
#[should_panic]
fn test_xorshift128p_zero_seed() {
let _: XorShiftPlusRng = SeedableRng::from_seed([0, 0]);
}
#[test]
fn test_xorshift64_non_zero_seed() {
let _: XorShiftRng = SeedableRng::from_seed([1, 1, 0, 0]);
}
#[test]
fn test_xorshift128p_non_zero_seed() {
let _: XorShiftPlusRng = SeedableRng::from_seed([1, 0]);
}
} | s: [w(0x193a6754a8a7d469), w(0x97830e05113ba7bb)]
}
}
}
| random_line_split |
xorshift.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Xorshift family of random number generators.
use std::num::Wrapping as w;
use {Rng, SeedableRng, Rand, w32, w64};
/// A Xorshift128+[1] random number generator.
///
/// The Xorshift128+ algorithm is not suitable for cryptographic purposes
/// but is very fast. If you do not know for sure that it fits your
/// requirements, use a more secure one such as `IsaacRng` or `OsRng`.
/// This variant uses 64bit arithmetic and is appropriated for 64bit architectures.
/// Compared to Xorshift128 this variant also produces a higher quality distribution.
///
/// [1]: Vigna, S. (2014). [Further scramblings of
/// Marsaglia's xorshift generators](http://arxiv.org/pdf/1404.0390.pdf).
/// arXiv preprint arXiv:1404.0390.
#[derive(Copy, Clone)]
pub struct XorShiftPlusRng {
s: [w64; 2]
}
impl XorShiftPlusRng {
/// Creates a new XorShiftPlusRng instance which is not seeded.
///
/// The initial values of this RNG are constants, so all generators created
/// by this function will yield the same stream of random numbers. It is
/// highly recommended that this is created through `SeedableRng` instead of
/// this function
pub fn new_unseeded() -> XorShiftPlusRng {
XorShiftPlusRng {
s: [w(0x193a6754a8a7d469), w(0x97830e05113ba7bb)]
}
}
}
impl Rng for XorShiftPlusRng {
#[inline]
fn | (&mut self) -> u32 {
self.next_u64() as u32
}
#[inline]
fn next_u64(&mut self) -> u64 {
let mut s1 = self.s[0];
let s0 = self.s[1];
self.s[0] = s0;
s1 = s1 ^ (s1 << 23); // a
self.s[1] = s1 ^ s0 ^ (s1 >> 17) ^ (s0 >> 26); // b, c
(self.s[1] + s0).0
}
}
impl SeedableRng<[u64; 2]> for XorShiftPlusRng {
/// Reseed an XorShiftPlusRng. This will panic if `seed` is entirely 0.
fn reseed(&mut self, seed: [u64; 2]) {
assert!(seed!= [0, 0],
"XorShiftPlusRng.reseed called with an all zero seed.");
self.s = [w(seed[0]), w(seed[1])];
}
/// Create a new XorShiftPlusRng. This will panic if `seed` is entirely 0.
fn from_seed(seed: [u64; 2]) -> XorShiftPlusRng {
assert!(seed!= [0, 0],
"XorShiftPlusRng::from_seed called with an all zero seed.");
XorShiftPlusRng {
s: [w(seed[0]), w(seed[1])]
}
}
}
impl Rand for XorShiftPlusRng {
fn rand<R: Rng>(rng: &mut R) -> XorShiftPlusRng {
let mut seed: (u64, u64) = rng.gen();
while seed == (0, 0) {
seed = rng.gen();
}
XorShiftPlusRng { s: [w(seed.0), w(seed.1)] }
}
}
/// An Xorshift128[1] random number generator.
///
/// The Xorshift128 algorithm is not suitable for cryptographic purposes
/// but is very fast. If you do not know for sure that it fits your
/// requirements, use a more secure one such as `IsaacRng` or `OsRng`.
/// This variant uses 32bit arithmetic and is appropriated for 32bit architectures.
///
/// [1]: Marsaglia, George (July 2003). ["Xorshift
/// RNGs"](http://www.jstatsoft.org/v08/i14/paper). *Journal of
/// Statistical Software*. Vol. 8 (Issue 14).
#[derive(Copy, Clone)]
pub struct XorShiftRng {
x: w32,
y: w32,
z: w32,
w: w32,
}
impl XorShiftRng {
/// Creates a new XorShiftRng instance which is not seeded.
///
/// The initial values of this RNG are constants, so all generators created
/// by this function will yield the same stream of random numbers. It is
/// highly recommended that this is created through `SeedableRng` instead of
/// this function
pub fn new_unseeded() -> XorShiftRng {
XorShiftRng {
x: w(0x193a6754),
y: w(0xa8a7d469),
z: w(0x97830e05),
w: w(0x113ba7bb),
}
}
}
impl Rng for XorShiftRng {
#[inline]
fn next_u32(&mut self) -> u32 {
let x = self.x;
let t = x ^ (x << 11);
self.x = self.y;
self.y = self.z;
self.z = self.w;
let w_ = self.w;
self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
self.w.0
}
}
impl SeedableRng<[u32; 4]> for XorShiftRng {
/// Reseed an XorShiftRng. This will panic if `seed` is entirely 0.
fn reseed(&mut self, seed: [u32; 4]) {
assert!(seed!= [0, 0, 0, 0],
"XorShiftRng.reseed called with an all zero seed.");
self.x = w(seed[0]);
self.y = w(seed[1]);
self.z = w(seed[2]);
self.w = w(seed[3]);
}
/// Create a new XorShiftRng. This will panic if `seed` is entirely 0.
fn from_seed(seed: [u32; 4]) -> XorShiftRng {
assert!(seed!= [0, 0, 0, 0],
"XorShiftRng::from_seed called with an all zero seed.");
XorShiftRng {
x: w(seed[0]),
y: w(seed[1]),
z: w(seed[2]),
w: w(seed[3]),
}
}
}
impl Rand for XorShiftRng {
fn rand<R: Rng>(rng: &mut R) -> XorShiftRng {
let mut tuple: (u32, u32, u32, u32) = rng.gen();
while tuple == (0, 0, 0, 0) {
tuple = rng.gen();
}
let (x, y, z, w_) = tuple;
XorShiftRng { x: w(x), y: w(y), z: w(z), w: w(w_) }
}
}
#[cfg(test)]
mod tests {
use SeedableRng;
use super::{XorShiftRng, XorShiftPlusRng};
#[test]
#[should_panic]
fn test_xorshift64_zero_seed() {
let _: XorShiftRng = SeedableRng::from_seed([0, 0, 0, 0]);
}
#[test]
#[should_panic]
fn test_xorshift128p_zero_seed() {
let _: XorShiftPlusRng = SeedableRng::from_seed([0, 0]);
}
#[test]
fn test_xorshift64_non_zero_seed() {
let _: XorShiftRng = SeedableRng::from_seed([1, 1, 0, 0]);
}
#[test]
fn test_xorshift128p_non_zero_seed() {
let _: XorShiftPlusRng = SeedableRng::from_seed([1, 0]);
}
} | next_u32 | identifier_name |
asm.rs | use super::BackendTypes;
use crate::mir::operand::OperandRef;
use crate::mir::place::PlaceRef;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::def_id::DefId;
use rustc_hir::LlvmInlineAsmInner;
use rustc_middle::ty::Instance;
use rustc_span::Span;
use rustc_target::asm::InlineAsmRegOrRegClass;
#[derive(Debug)]
pub enum InlineAsmOperandRef<'tcx, B: BackendTypes +?Sized> {
In {
reg: InlineAsmRegOrRegClass,
value: OperandRef<'tcx, B::Value>,
},
Out {
reg: InlineAsmRegOrRegClass,
late: bool,
place: Option<PlaceRef<'tcx, B::Value>>,
},
InOut {
reg: InlineAsmRegOrRegClass,
late: bool,
in_value: OperandRef<'tcx, B::Value>, | Const {
string: String,
},
SymFn {
instance: Instance<'tcx>,
},
SymStatic {
def_id: DefId,
},
}
#[derive(Debug)]
pub enum GlobalAsmOperandRef {
Const { string: String },
}
pub trait AsmBuilderMethods<'tcx>: BackendTypes {
/// Take an inline assembly expression and splat it out via LLVM
fn codegen_llvm_inline_asm(
&mut self,
ia: &LlvmInlineAsmInner,
outputs: Vec<PlaceRef<'tcx, Self::Value>>,
inputs: Vec<Self::Value>,
span: Span,
) -> bool;
/// Take an inline assembly expression and splat it out via LLVM
fn codegen_inline_asm(
&mut self,
template: &[InlineAsmTemplatePiece],
operands: &[InlineAsmOperandRef<'tcx, Self>],
options: InlineAsmOptions,
line_spans: &[Span],
);
}
pub trait AsmMethods {
fn codegen_global_asm(
&self,
template: &[InlineAsmTemplatePiece],
operands: &[GlobalAsmOperandRef],
options: InlineAsmOptions,
line_spans: &[Span],
);
} | out_place: Option<PlaceRef<'tcx, B::Value>>,
}, | random_line_split |
asm.rs | use super::BackendTypes;
use crate::mir::operand::OperandRef;
use crate::mir::place::PlaceRef;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::def_id::DefId;
use rustc_hir::LlvmInlineAsmInner;
use rustc_middle::ty::Instance;
use rustc_span::Span;
use rustc_target::asm::InlineAsmRegOrRegClass;
#[derive(Debug)]
pub enum | <'tcx, B: BackendTypes +?Sized> {
In {
reg: InlineAsmRegOrRegClass,
value: OperandRef<'tcx, B::Value>,
},
Out {
reg: InlineAsmRegOrRegClass,
late: bool,
place: Option<PlaceRef<'tcx, B::Value>>,
},
InOut {
reg: InlineAsmRegOrRegClass,
late: bool,
in_value: OperandRef<'tcx, B::Value>,
out_place: Option<PlaceRef<'tcx, B::Value>>,
},
Const {
string: String,
},
SymFn {
instance: Instance<'tcx>,
},
SymStatic {
def_id: DefId,
},
}
#[derive(Debug)]
pub enum GlobalAsmOperandRef {
Const { string: String },
}
pub trait AsmBuilderMethods<'tcx>: BackendTypes {
/// Take an inline assembly expression and splat it out via LLVM
fn codegen_llvm_inline_asm(
&mut self,
ia: &LlvmInlineAsmInner,
outputs: Vec<PlaceRef<'tcx, Self::Value>>,
inputs: Vec<Self::Value>,
span: Span,
) -> bool;
/// Take an inline assembly expression and splat it out via LLVM
fn codegen_inline_asm(
&mut self,
template: &[InlineAsmTemplatePiece],
operands: &[InlineAsmOperandRef<'tcx, Self>],
options: InlineAsmOptions,
line_spans: &[Span],
);
}
pub trait AsmMethods {
fn codegen_global_asm(
&self,
template: &[InlineAsmTemplatePiece],
operands: &[GlobalAsmOperandRef],
options: InlineAsmOptions,
line_spans: &[Span],
);
}
| InlineAsmOperandRef | identifier_name |
const-fields-and-indexing.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
static x : [int,..4] = [1,2,3,4];
static p : int = x[2];
static y : &'static [int] = &[1,2,3,4];
static q : int = y[2];
struct S {a: int, b: int}
static s : S = S {a: 10, b: 20};
static t : int = s.b;
struct K {a: int, b: int, c: D}
struct D { d: int, e: int }
static k : K = K {a: 10, b: 20, c: D {d: 30, e: 40}};
static m : int = k.c.e;
| println!("{:?}", q);
println!("{:?}", t);
assert_eq!(p, 3);
assert_eq!(q, 3);
assert_eq!(t, 20);
} | pub fn main() {
println!("{:?}", p); | random_line_split |
const-fields-and-indexing.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
static x : [int,..4] = [1,2,3,4];
static p : int = x[2];
static y : &'static [int] = &[1,2,3,4];
static q : int = y[2];
struct S {a: int, b: int}
static s : S = S {a: 10, b: 20};
static t : int = s.b;
struct K {a: int, b: int, c: D}
struct | { d: int, e: int }
static k : K = K {a: 10, b: 20, c: D {d: 30, e: 40}};
static m : int = k.c.e;
pub fn main() {
println!("{:?}", p);
println!("{:?}", q);
println!("{:?}", t);
assert_eq!(p, 3);
assert_eq!(q, 3);
assert_eq!(t, 20);
}
| D | identifier_name |
iter.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! External iterators for generic mathematics
use {Integer, Zero, One, CheckedAdd, ToPrimitive};
use redox::ops::{Add, Sub};
/// An iterator over the range [start, stop)
#[derive(Clone)]
pub struct Range<A> {
state: A,
stop: A,
one: A
}
/// Returns an iterator over the given range [start, stop) (that is, starting
/// at start (inclusive), and ending at stop (exclusive)).
///
/// # Example
///
/// ```rust
/// use num::iter;
///
/// let array = [0, 1, 2, 3, 4];
///
/// for i in iter::range(0, 5) {
/// println!("{}", i);
/// assert_eq!(i, array[i]);
/// }
/// ```
#[inline]
pub fn range<A>(start: A, stop: A) -> Range<A>
where A: Add<A, Output = A> + PartialOrd + Clone + One
{
Range{state: start, stop: stop, one: One::one()}
}
// FIXME: rust-lang/rust#10414: Unfortunate type bound
impl<A> Iterator for Range<A>
where A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if self.state < self.stop {
let result = self.state.clone();
self.state = self.state.clone() + self.one.clone();
Some(result)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// This first checks if the elements are representable as i64. If they aren't, try u64 (to
// handle cases like range(huge, huger)). We don't use usize/int because the difference of
// the i64/u64 might lie within their range.
let bound = match self.state.to_i64() {
Some(a) => {
let sz = self.stop.to_i64().map(|b| b.checked_sub(a));
match sz {
Some(Some(bound)) => bound.to_usize(),
_ => None,
}
},
None => match self.state.to_u64() {
Some(a) => {
let sz = self.stop.to_u64().map(|b| b.checked_sub(a));
match sz {
Some(Some(bound)) => bound.to_usize(),
_ => None
}
},
None => None
}
};
match bound {
Some(b) => (b, Some(b)),
// Standard fallback for unbounded/unrepresentable bounds
None => (0, None)
}
}
}
/// `Integer` is required to ensure the range will be the same regardless of
/// the direction it is consumed.
impl<A> DoubleEndedIterator for Range<A>
where A: Integer + PartialOrd + Clone + ToPrimitive
{
#[inline]
fn next_back(&mut self) -> Option<A> {
if self.stop > self.state {
self.stop = self.stop.clone() - self.one.clone();
Some(self.stop.clone())
} else {
None
}
}
}
/// An iterator over the range [start, stop]
#[derive(Clone)]
pub struct | <A> {
range: Range<A>,
done: bool,
}
/// Return an iterator over the range [start, stop]
#[inline]
pub fn range_inclusive<A>(start: A, stop: A) -> RangeInclusive<A>
where A: Add<A, Output = A> + PartialOrd + Clone + One
{
RangeInclusive{range: range(start, stop), done: false}
}
impl<A> Iterator for RangeInclusive<A>
where A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
match self.range.next() {
Some(x) => Some(x),
None => {
if!self.done && self.range.state == self.range.stop {
self.done = true;
Some(self.range.stop.clone())
} else {
None
}
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lo, hi) = self.range.size_hint();
if self.done {
(lo, hi)
} else {
let lo = lo.saturating_add(1);
let hi = match hi {
Some(x) => x.checked_add(1),
None => None
};
(lo, hi)
}
}
}
impl<A> DoubleEndedIterator for RangeInclusive<A>
where A: Sub<A, Output = A> + Integer + PartialOrd + Clone + ToPrimitive
{
#[inline]
fn next_back(&mut self) -> Option<A> {
if self.range.stop > self.range.state {
let result = self.range.stop.clone();
self.range.stop = self.range.stop.clone() - self.range.one.clone();
Some(result)
} else if!self.done && self.range.state == self.range.stop {
self.done = true;
Some(self.range.stop.clone())
} else {
None
}
}
}
/// An iterator over the range [start, stop) by `step`. It handles overflow by stopping.
#[derive(Clone)]
pub struct RangeStep<A> {
state: A,
stop: A,
step: A,
rev: bool,
}
/// Return an iterator over the range [start, stop) by `step`. It handles overflow by stopping.
#[inline]
pub fn range_step<A>(start: A, stop: A, step: A) -> RangeStep<A>
where A: CheckedAdd + PartialOrd + Clone + Zero
{
let rev = step < Zero::zero();
RangeStep{state: start, stop: stop, step: step, rev: rev}
}
impl<A> Iterator for RangeStep<A>
where A: CheckedAdd + PartialOrd + Clone
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if (self.rev && self.state > self.stop) || (!self.rev && self.state < self.stop) {
let result = self.state.clone();
match self.state.checked_add(&self.step) {
Some(x) => self.state = x,
None => self.state = self.stop.clone()
}
Some(result)
} else {
None
}
}
}
/// An iterator over the range [start, stop] by `step`. It handles overflow by stopping.
#[derive(Clone)]
pub struct RangeStepInclusive<A> {
state: A,
stop: A,
step: A,
rev: bool,
done: bool,
}
/// Return an iterator over the range [start, stop] by `step`. It handles overflow by stopping.
#[inline]
pub fn range_step_inclusive<A>(start: A, stop: A, step: A) -> RangeStepInclusive<A>
where A: CheckedAdd + PartialOrd + Clone + Zero
{
let rev = step < Zero::zero();
RangeStepInclusive{state: start, stop: stop, step: step, rev: rev, done: false}
}
impl<A> Iterator for RangeStepInclusive<A>
where A: CheckedAdd + PartialOrd + Clone + PartialEq
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if!self.done && ((self.rev && self.state >= self.stop) ||
(!self.rev && self.state <= self.stop)) {
let result = self.state.clone();
match self.state.checked_add(&self.step) {
Some(x) => self.state = x,
None => self.done = true
}
Some(result)
} else {
None
}
}
}
| RangeInclusive | identifier_name |
iter.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! External iterators for generic mathematics
use {Integer, Zero, One, CheckedAdd, ToPrimitive};
use redox::ops::{Add, Sub};
/// An iterator over the range [start, stop)
#[derive(Clone)]
pub struct Range<A> {
state: A,
stop: A,
one: A
}
/// Returns an iterator over the given range [start, stop) (that is, starting
/// at start (inclusive), and ending at stop (exclusive)).
///
/// # Example
///
/// ```rust
/// use num::iter;
///
/// let array = [0, 1, 2, 3, 4];
///
/// for i in iter::range(0, 5) {
/// println!("{}", i);
/// assert_eq!(i, array[i]);
/// }
/// ```
#[inline]
pub fn range<A>(start: A, stop: A) -> Range<A>
where A: Add<A, Output = A> + PartialOrd + Clone + One
{
Range{state: start, stop: stop, one: One::one()}
}
// FIXME: rust-lang/rust#10414: Unfortunate type bound
impl<A> Iterator for Range<A>
where A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if self.state < self.stop {
let result = self.state.clone();
self.state = self.state.clone() + self.one.clone();
Some(result)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// This first checks if the elements are representable as i64. If they aren't, try u64 (to
// handle cases like range(huge, huger)). We don't use usize/int because the difference of
// the i64/u64 might lie within their range.
let bound = match self.state.to_i64() {
Some(a) => {
let sz = self.stop.to_i64().map(|b| b.checked_sub(a));
match sz {
Some(Some(bound)) => bound.to_usize(),
_ => None,
}
},
None => match self.state.to_u64() {
Some(a) => {
let sz = self.stop.to_u64().map(|b| b.checked_sub(a));
match sz {
Some(Some(bound)) => bound.to_usize(),
_ => None
}
},
None => None
}
};
match bound {
Some(b) => (b, Some(b)),
// Standard fallback for unbounded/unrepresentable bounds
None => (0, None)
}
}
}
/// `Integer` is required to ensure the range will be the same regardless of
/// the direction it is consumed.
impl<A> DoubleEndedIterator for Range<A>
where A: Integer + PartialOrd + Clone + ToPrimitive
{
#[inline]
fn next_back(&mut self) -> Option<A> {
if self.stop > self.state {
self.stop = self.stop.clone() - self.one.clone();
Some(self.stop.clone())
} else {
None
}
}
}
/// An iterator over the range [start, stop]
#[derive(Clone)]
pub struct RangeInclusive<A> {
range: Range<A>,
done: bool,
}
/// Return an iterator over the range [start, stop]
#[inline]
pub fn range_inclusive<A>(start: A, stop: A) -> RangeInclusive<A>
where A: Add<A, Output = A> + PartialOrd + Clone + One
{
RangeInclusive{range: range(start, stop), done: false}
}
impl<A> Iterator for RangeInclusive<A>
where A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
match self.range.next() {
Some(x) => Some(x),
None => {
if!self.done && self.range.state == self.range.stop {
self.done = true;
Some(self.range.stop.clone())
} else {
None
}
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lo, hi) = self.range.size_hint();
if self.done {
(lo, hi)
} else {
let lo = lo.saturating_add(1);
let hi = match hi {
Some(x) => x.checked_add(1),
None => None
};
(lo, hi)
}
}
}
impl<A> DoubleEndedIterator for RangeInclusive<A>
where A: Sub<A, Output = A> + Integer + PartialOrd + Clone + ToPrimitive
{
#[inline]
fn next_back(&mut self) -> Option<A> {
if self.range.stop > self.range.state {
let result = self.range.stop.clone();
self.range.stop = self.range.stop.clone() - self.range.one.clone();
Some(result)
} else if!self.done && self.range.state == self.range.stop {
self.done = true;
Some(self.range.stop.clone())
} else {
None
}
}
}
/// An iterator over the range [start, stop) by `step`. It handles overflow by stopping.
#[derive(Clone)]
pub struct RangeStep<A> {
state: A,
stop: A,
step: A,
rev: bool,
}
/// Return an iterator over the range [start, stop) by `step`. It handles overflow by stopping.
#[inline]
pub fn range_step<A>(start: A, stop: A, step: A) -> RangeStep<A>
where A: CheckedAdd + PartialOrd + Clone + Zero
{
let rev = step < Zero::zero();
RangeStep{state: start, stop: stop, step: step, rev: rev}
}
impl<A> Iterator for RangeStep<A>
where A: CheckedAdd + PartialOrd + Clone
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if (self.rev && self.state > self.stop) || (!self.rev && self.state < self.stop) {
let result = self.state.clone();
match self.state.checked_add(&self.step) {
Some(x) => self.state = x,
None => self.state = self.stop.clone()
}
Some(result)
} else {
None
}
}
}
/// An iterator over the range [start, stop] by `step`. It handles overflow by stopping.
#[derive(Clone)]
pub struct RangeStepInclusive<A> {
state: A,
stop: A,
step: A,
rev: bool,
done: bool,
}
/// Return an iterator over the range [start, stop] by `step`. It handles overflow by stopping.
#[inline]
pub fn range_step_inclusive<A>(start: A, stop: A, step: A) -> RangeStepInclusive<A>
where A: CheckedAdd + PartialOrd + Clone + Zero
{
let rev = step < Zero::zero();
RangeStepInclusive{state: start, stop: stop, step: step, rev: rev, done: false}
}
impl<A> Iterator for RangeStepInclusive<A>
where A: CheckedAdd + PartialOrd + Clone + PartialEq
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> |
}
| {
if !self.done && ((self.rev && self.state >= self.stop) ||
(!self.rev && self.state <= self.stop)) {
let result = self.state.clone();
match self.state.checked_add(&self.step) {
Some(x) => self.state = x,
None => self.done = true
}
Some(result)
} else {
None
}
} | identifier_body |
iter.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! External iterators for generic mathematics
use {Integer, Zero, One, CheckedAdd, ToPrimitive};
use redox::ops::{Add, Sub};
/// An iterator over the range [start, stop)
#[derive(Clone)]
pub struct Range<A> {
state: A,
stop: A,
one: A
}
/// Returns an iterator over the given range [start, stop) (that is, starting
/// at start (inclusive), and ending at stop (exclusive)).
/// | ///
/// let array = [0, 1, 2, 3, 4];
///
/// for i in iter::range(0, 5) {
/// println!("{}", i);
/// assert_eq!(i, array[i]);
/// }
/// ```
#[inline]
pub fn range<A>(start: A, stop: A) -> Range<A>
where A: Add<A, Output = A> + PartialOrd + Clone + One
{
Range{state: start, stop: stop, one: One::one()}
}
// FIXME: rust-lang/rust#10414: Unfortunate type bound
impl<A> Iterator for Range<A>
where A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if self.state < self.stop {
let result = self.state.clone();
self.state = self.state.clone() + self.one.clone();
Some(result)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// This first checks if the elements are representable as i64. If they aren't, try u64 (to
// handle cases like range(huge, huger)). We don't use usize/int because the difference of
// the i64/u64 might lie within their range.
let bound = match self.state.to_i64() {
Some(a) => {
let sz = self.stop.to_i64().map(|b| b.checked_sub(a));
match sz {
Some(Some(bound)) => bound.to_usize(),
_ => None,
}
},
None => match self.state.to_u64() {
Some(a) => {
let sz = self.stop.to_u64().map(|b| b.checked_sub(a));
match sz {
Some(Some(bound)) => bound.to_usize(),
_ => None
}
},
None => None
}
};
match bound {
Some(b) => (b, Some(b)),
// Standard fallback for unbounded/unrepresentable bounds
None => (0, None)
}
}
}
/// `Integer` is required to ensure the range will be the same regardless of
/// the direction it is consumed.
impl<A> DoubleEndedIterator for Range<A>
where A: Integer + PartialOrd + Clone + ToPrimitive
{
#[inline]
fn next_back(&mut self) -> Option<A> {
if self.stop > self.state {
self.stop = self.stop.clone() - self.one.clone();
Some(self.stop.clone())
} else {
None
}
}
}
/// An iterator over the range [start, stop]
#[derive(Clone)]
pub struct RangeInclusive<A> {
range: Range<A>,
done: bool,
}
/// Return an iterator over the range [start, stop]
#[inline]
pub fn range_inclusive<A>(start: A, stop: A) -> RangeInclusive<A>
where A: Add<A, Output = A> + PartialOrd + Clone + One
{
RangeInclusive{range: range(start, stop), done: false}
}
impl<A> Iterator for RangeInclusive<A>
where A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
match self.range.next() {
Some(x) => Some(x),
None => {
if!self.done && self.range.state == self.range.stop {
self.done = true;
Some(self.range.stop.clone())
} else {
None
}
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lo, hi) = self.range.size_hint();
if self.done {
(lo, hi)
} else {
let lo = lo.saturating_add(1);
let hi = match hi {
Some(x) => x.checked_add(1),
None => None
};
(lo, hi)
}
}
}
impl<A> DoubleEndedIterator for RangeInclusive<A>
where A: Sub<A, Output = A> + Integer + PartialOrd + Clone + ToPrimitive
{
#[inline]
fn next_back(&mut self) -> Option<A> {
if self.range.stop > self.range.state {
let result = self.range.stop.clone();
self.range.stop = self.range.stop.clone() - self.range.one.clone();
Some(result)
} else if!self.done && self.range.state == self.range.stop {
self.done = true;
Some(self.range.stop.clone())
} else {
None
}
}
}
/// An iterator over the range [start, stop) by `step`. It handles overflow by stopping.
#[derive(Clone)]
pub struct RangeStep<A> {
state: A,
stop: A,
step: A,
rev: bool,
}
/// Return an iterator over the range [start, stop) by `step`. It handles overflow by stopping.
#[inline]
pub fn range_step<A>(start: A, stop: A, step: A) -> RangeStep<A>
where A: CheckedAdd + PartialOrd + Clone + Zero
{
let rev = step < Zero::zero();
RangeStep{state: start, stop: stop, step: step, rev: rev}
}
impl<A> Iterator for RangeStep<A>
where A: CheckedAdd + PartialOrd + Clone
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if (self.rev && self.state > self.stop) || (!self.rev && self.state < self.stop) {
let result = self.state.clone();
match self.state.checked_add(&self.step) {
Some(x) => self.state = x,
None => self.state = self.stop.clone()
}
Some(result)
} else {
None
}
}
}
/// An iterator over the range [start, stop] by `step`. It handles overflow by stopping.
#[derive(Clone)]
pub struct RangeStepInclusive<A> {
state: A,
stop: A,
step: A,
rev: bool,
done: bool,
}
/// Return an iterator over the range [start, stop] by `step`. It handles overflow by stopping.
#[inline]
pub fn range_step_inclusive<A>(start: A, stop: A, step: A) -> RangeStepInclusive<A>
where A: CheckedAdd + PartialOrd + Clone + Zero
{
let rev = step < Zero::zero();
RangeStepInclusive{state: start, stop: stop, step: step, rev: rev, done: false}
}
impl<A> Iterator for RangeStepInclusive<A>
where A: CheckedAdd + PartialOrd + Clone + PartialEq
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if!self.done && ((self.rev && self.state >= self.stop) ||
(!self.rev && self.state <= self.stop)) {
let result = self.state.clone();
match self.state.checked_add(&self.step) {
Some(x) => self.state = x,
None => self.done = true
}
Some(result)
} else {
None
}
}
} | /// # Example
///
/// ```rust
/// use num::iter; | random_line_split |
iter.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! External iterators for generic mathematics
use {Integer, Zero, One, CheckedAdd, ToPrimitive};
use redox::ops::{Add, Sub};
/// An iterator over the range [start, stop)
#[derive(Clone)]
pub struct Range<A> {
state: A,
stop: A,
one: A
}
/// Returns an iterator over the given range [start, stop) (that is, starting
/// at start (inclusive), and ending at stop (exclusive)).
///
/// # Example
///
/// ```rust
/// use num::iter;
///
/// let array = [0, 1, 2, 3, 4];
///
/// for i in iter::range(0, 5) {
/// println!("{}", i);
/// assert_eq!(i, array[i]);
/// }
/// ```
#[inline]
pub fn range<A>(start: A, stop: A) -> Range<A>
where A: Add<A, Output = A> + PartialOrd + Clone + One
{
Range{state: start, stop: stop, one: One::one()}
}
// FIXME: rust-lang/rust#10414: Unfortunate type bound
impl<A> Iterator for Range<A>
where A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if self.state < self.stop {
let result = self.state.clone();
self.state = self.state.clone() + self.one.clone();
Some(result)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// This first checks if the elements are representable as i64. If they aren't, try u64 (to
// handle cases like range(huge, huger)). We don't use usize/int because the difference of
// the i64/u64 might lie within their range.
let bound = match self.state.to_i64() {
Some(a) => {
let sz = self.stop.to_i64().map(|b| b.checked_sub(a));
match sz {
Some(Some(bound)) => bound.to_usize(),
_ => None,
}
},
None => match self.state.to_u64() {
Some(a) => {
let sz = self.stop.to_u64().map(|b| b.checked_sub(a));
match sz {
Some(Some(bound)) => bound.to_usize(),
_ => None
}
},
None => None
}
};
match bound {
Some(b) => (b, Some(b)),
// Standard fallback for unbounded/unrepresentable bounds
None => (0, None)
}
}
}
/// `Integer` is required to ensure the range will be the same regardless of
/// the direction it is consumed.
impl<A> DoubleEndedIterator for Range<A>
where A: Integer + PartialOrd + Clone + ToPrimitive
{
#[inline]
fn next_back(&mut self) -> Option<A> {
if self.stop > self.state {
self.stop = self.stop.clone() - self.one.clone();
Some(self.stop.clone())
} else {
None
}
}
}
/// An iterator over the range [start, stop]
#[derive(Clone)]
pub struct RangeInclusive<A> {
range: Range<A>,
done: bool,
}
/// Return an iterator over the range [start, stop]
#[inline]
pub fn range_inclusive<A>(start: A, stop: A) -> RangeInclusive<A>
where A: Add<A, Output = A> + PartialOrd + Clone + One
{
RangeInclusive{range: range(start, stop), done: false}
}
impl<A> Iterator for RangeInclusive<A>
where A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
match self.range.next() {
Some(x) => Some(x),
None => {
if!self.done && self.range.state == self.range.stop {
self.done = true;
Some(self.range.stop.clone())
} else |
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lo, hi) = self.range.size_hint();
if self.done {
(lo, hi)
} else {
let lo = lo.saturating_add(1);
let hi = match hi {
Some(x) => x.checked_add(1),
None => None
};
(lo, hi)
}
}
}
impl<A> DoubleEndedIterator for RangeInclusive<A>
where A: Sub<A, Output = A> + Integer + PartialOrd + Clone + ToPrimitive
{
#[inline]
fn next_back(&mut self) -> Option<A> {
if self.range.stop > self.range.state {
let result = self.range.stop.clone();
self.range.stop = self.range.stop.clone() - self.range.one.clone();
Some(result)
} else if!self.done && self.range.state == self.range.stop {
self.done = true;
Some(self.range.stop.clone())
} else {
None
}
}
}
/// An iterator over the range [start, stop) by `step`. It handles overflow by stopping.
#[derive(Clone)]
pub struct RangeStep<A> {
state: A,
stop: A,
step: A,
rev: bool,
}
/// Return an iterator over the range [start, stop) by `step`. It handles overflow by stopping.
#[inline]
pub fn range_step<A>(start: A, stop: A, step: A) -> RangeStep<A>
where A: CheckedAdd + PartialOrd + Clone + Zero
{
let rev = step < Zero::zero();
RangeStep{state: start, stop: stop, step: step, rev: rev}
}
impl<A> Iterator for RangeStep<A>
where A: CheckedAdd + PartialOrd + Clone
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if (self.rev && self.state > self.stop) || (!self.rev && self.state < self.stop) {
let result = self.state.clone();
match self.state.checked_add(&self.step) {
Some(x) => self.state = x,
None => self.state = self.stop.clone()
}
Some(result)
} else {
None
}
}
}
/// An iterator over the range [start, stop] by `step`. It handles overflow by stopping.
#[derive(Clone)]
pub struct RangeStepInclusive<A> {
state: A,
stop: A,
step: A,
rev: bool,
done: bool,
}
/// Return an iterator over the range [start, stop] by `step`. It handles overflow by stopping.
#[inline]
pub fn range_step_inclusive<A>(start: A, stop: A, step: A) -> RangeStepInclusive<A>
where A: CheckedAdd + PartialOrd + Clone + Zero
{
let rev = step < Zero::zero();
RangeStepInclusive{state: start, stop: stop, step: step, rev: rev, done: false}
}
impl<A> Iterator for RangeStepInclusive<A>
where A: CheckedAdd + PartialOrd + Clone + PartialEq
{
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
if!self.done && ((self.rev && self.state >= self.stop) ||
(!self.rev && self.state <= self.stop)) {
let result = self.state.clone();
match self.state.checked_add(&self.step) {
Some(x) => self.state = x,
None => self.done = true
}
Some(result)
} else {
None
}
}
}
| {
None
} | conditional_block |
test_terminal.rs | use crate::input::Key;
use crate::models::application::Event;
use scribe::buffer::Position;
use std::sync::Mutex;
use super::Terminal;
use crate::view::{Colors, Style};
const WIDTH: usize = 10;
const HEIGHT: usize = 10;
// A headless terminal that tracks printed data, which can be
// returned as a String to test display logic of other types.
pub struct TestTerminal {
data: Mutex<[[Option<(char, Colors)>; WIDTH]; HEIGHT]>, // 2D array of chars to represent screen
cursor: Mutex<Option<Position>>,
key_sent: Mutex<bool>
}
impl TestTerminal {
pub fn new() -> TestTerminal {
TestTerminal {
data: Mutex::new([[None; WIDTH]; HEIGHT]),
cursor: Mutex::new(None),
key_sent: Mutex::new(false)
}
}
// Returns a String representation of the printed data.
pub fn content(&self) -> String {
let mut data = String::new();
let mut last_row_with_data = 0;
let mut last_column_with_data = 0;
for (y, row) in self.data.lock().unwrap().iter().enumerate() {
for (x, cell) in row.iter().enumerate() {
if let Some((c, _)) = *cell |
}
}
data
}
}
impl Terminal for TestTerminal {
fn listen(&self) -> Option<Event> {
// This implementation will return a key once, followed by nothing.
// This allows us to test both scenarios, the latter being crucial
// to stopping the application in test mode; the input listener only
// checks for kill signals when the terminal returns no input.
let mut key_sent = self.key_sent.lock().unwrap();
if *key_sent {
None
} else {
*key_sent = true;
Some(Event::Key(Key::Char('A')))
}
}
fn clear(&self) {
for row in self.data.lock().unwrap().iter_mut() {
*row = [None; WIDTH];
}
}
fn present(&self) { }
fn width(&self) -> usize { WIDTH }
fn height(&self) -> usize { HEIGHT }
fn set_cursor(&self, position: Option<Position>) {
let mut cursor = self.cursor.lock().unwrap();
*cursor = position;
}
fn suspend(&self) { }
fn print(&self, position: &Position, _: Style, colors: Colors, content: &str) {
// Ignore lines beyond visible height.
if position.line >= self.height() { return; }
let mut data = self.data.lock().unwrap();
let string_content = format!("{}", content);
for (i, c) in string_content.chars().enumerate() {
// Ignore characters beyond visible width.
if i+position.offset >= WIDTH { break; }
data[position.line][i+position.offset] = Some((c, colors));
}
}
}
#[cfg(test)]
mod tests {
use crate::view::terminal::Terminal;
use super::TestTerminal;
use crate::view::{Colors, Style};
use scribe::buffer::Position;
#[test]
fn print_sets_terminal_data_correctly() {
let terminal = Box::new(TestTerminal::new());
terminal.print(&Position{ line: 0, offset: 0 }, Style::Default, Colors::Default, &"data");
assert_eq!(terminal.content(), "data");
}
#[test]
fn data_uses_newlines_and_spaces_to_represent_structure() {
let terminal = Box::new(TestTerminal::new());
// Setting a non-zero x coordinate on a previous line exercises column resetting.
terminal.print(&Position{ line: 0, offset: 2 }, Style::Default, Colors::Default, &"some");
terminal.print(&Position{ line: 2, offset: 5 }, Style::Default, Colors::Default, &"data");
assert_eq!(terminal.content(), " some\n\n data");
}
}
| {
for _ in last_row_with_data..y {
data.push('\n');
last_column_with_data = 0;
}
for _ in last_column_with_data..x {
data.push(' ');
}
data.push(c);
last_row_with_data = y;
// Since the column changes on each character, and we don't
// want to print a space in between every character, we
// set it ahead when we've run into a character to
// differentiate from leading spaces.
last_column_with_data = x+1;
} | conditional_block |
test_terminal.rs | use crate::input::Key;
use crate::models::application::Event;
use scribe::buffer::Position;
use std::sync::Mutex;
use super::Terminal;
use crate::view::{Colors, Style};
const WIDTH: usize = 10;
const HEIGHT: usize = 10;
// A headless terminal that tracks printed data, which can be
// returned as a String to test display logic of other types.
pub struct TestTerminal {
data: Mutex<[[Option<(char, Colors)>; WIDTH]; HEIGHT]>, // 2D array of chars to represent screen
cursor: Mutex<Option<Position>>,
key_sent: Mutex<bool>
}
impl TestTerminal {
pub fn new() -> TestTerminal {
TestTerminal {
data: Mutex::new([[None; WIDTH]; HEIGHT]),
cursor: Mutex::new(None),
key_sent: Mutex::new(false)
}
}
// Returns a String representation of the printed data.
pub fn content(&self) -> String {
let mut data = String::new();
let mut last_row_with_data = 0;
let mut last_column_with_data = 0;
for (y, row) in self.data.lock().unwrap().iter().enumerate() {
for (x, cell) in row.iter().enumerate() {
if let Some((c, _)) = *cell {
for _ in last_row_with_data..y {
data.push('\n');
last_column_with_data = 0;
}
for _ in last_column_with_data..x {
data.push(' ');
}
data.push(c);
last_row_with_data = y;
// Since the column changes on each character, and we don't
// want to print a space in between every character, we | }
}
}
data
}
}
impl Terminal for TestTerminal {
fn listen(&self) -> Option<Event> {
// This implementation will return a key once, followed by nothing.
// This allows us to test both scenarios, the latter being crucial
// to stopping the application in test mode; the input listener only
// checks for kill signals when the terminal returns no input.
let mut key_sent = self.key_sent.lock().unwrap();
if *key_sent {
None
} else {
*key_sent = true;
Some(Event::Key(Key::Char('A')))
}
}
fn clear(&self) {
for row in self.data.lock().unwrap().iter_mut() {
*row = [None; WIDTH];
}
}
fn present(&self) { }
fn width(&self) -> usize { WIDTH }
fn height(&self) -> usize { HEIGHT }
fn set_cursor(&self, position: Option<Position>) {
let mut cursor = self.cursor.lock().unwrap();
*cursor = position;
}
fn suspend(&self) { }
fn print(&self, position: &Position, _: Style, colors: Colors, content: &str) {
// Ignore lines beyond visible height.
if position.line >= self.height() { return; }
let mut data = self.data.lock().unwrap();
let string_content = format!("{}", content);
for (i, c) in string_content.chars().enumerate() {
// Ignore characters beyond visible width.
if i+position.offset >= WIDTH { break; }
data[position.line][i+position.offset] = Some((c, colors));
}
}
}
#[cfg(test)]
mod tests {
use crate::view::terminal::Terminal;
use super::TestTerminal;
use crate::view::{Colors, Style};
use scribe::buffer::Position;
#[test]
fn print_sets_terminal_data_correctly() {
let terminal = Box::new(TestTerminal::new());
terminal.print(&Position{ line: 0, offset: 0 }, Style::Default, Colors::Default, &"data");
assert_eq!(terminal.content(), "data");
}
#[test]
fn data_uses_newlines_and_spaces_to_represent_structure() {
let terminal = Box::new(TestTerminal::new());
// Setting a non-zero x coordinate on a previous line exercises column resetting.
terminal.print(&Position{ line: 0, offset: 2 }, Style::Default, Colors::Default, &"some");
terminal.print(&Position{ line: 2, offset: 5 }, Style::Default, Colors::Default, &"data");
assert_eq!(terminal.content(), " some\n\n data");
}
} | // set it ahead when we've run into a character to
// differentiate from leading spaces.
last_column_with_data = x+1; | random_line_split |
test_terminal.rs | use crate::input::Key;
use crate::models::application::Event;
use scribe::buffer::Position;
use std::sync::Mutex;
use super::Terminal;
use crate::view::{Colors, Style};
const WIDTH: usize = 10;
const HEIGHT: usize = 10;
// A headless terminal that tracks printed data, which can be
// returned as a String to test display logic of other types.
pub struct TestTerminal {
data: Mutex<[[Option<(char, Colors)>; WIDTH]; HEIGHT]>, // 2D array of chars to represent screen
cursor: Mutex<Option<Position>>,
key_sent: Mutex<bool>
}
impl TestTerminal {
pub fn new() -> TestTerminal {
TestTerminal {
data: Mutex::new([[None; WIDTH]; HEIGHT]),
cursor: Mutex::new(None),
key_sent: Mutex::new(false)
}
}
// Returns a String representation of the printed data.
pub fn content(&self) -> String {
let mut data = String::new();
let mut last_row_with_data = 0;
let mut last_column_with_data = 0;
for (y, row) in self.data.lock().unwrap().iter().enumerate() {
for (x, cell) in row.iter().enumerate() {
if let Some((c, _)) = *cell {
for _ in last_row_with_data..y {
data.push('\n');
last_column_with_data = 0;
}
for _ in last_column_with_data..x {
data.push(' ');
}
data.push(c);
last_row_with_data = y;
// Since the column changes on each character, and we don't
// want to print a space in between every character, we
// set it ahead when we've run into a character to
// differentiate from leading spaces.
last_column_with_data = x+1;
}
}
}
data
}
}
impl Terminal for TestTerminal {
fn listen(&self) -> Option<Event> {
// This implementation will return a key once, followed by nothing.
// This allows us to test both scenarios, the latter being crucial
// to stopping the application in test mode; the input listener only
// checks for kill signals when the terminal returns no input.
let mut key_sent = self.key_sent.lock().unwrap();
if *key_sent {
None
} else {
*key_sent = true;
Some(Event::Key(Key::Char('A')))
}
}
fn clear(&self) |
fn present(&self) { }
fn width(&self) -> usize { WIDTH }
fn height(&self) -> usize { HEIGHT }
fn set_cursor(&self, position: Option<Position>) {
let mut cursor = self.cursor.lock().unwrap();
*cursor = position;
}
fn suspend(&self) { }
fn print(&self, position: &Position, _: Style, colors: Colors, content: &str) {
// Ignore lines beyond visible height.
if position.line >= self.height() { return; }
let mut data = self.data.lock().unwrap();
let string_content = format!("{}", content);
for (i, c) in string_content.chars().enumerate() {
// Ignore characters beyond visible width.
if i+position.offset >= WIDTH { break; }
data[position.line][i+position.offset] = Some((c, colors));
}
}
}
#[cfg(test)]
mod tests {
use crate::view::terminal::Terminal;
use super::TestTerminal;
use crate::view::{Colors, Style};
use scribe::buffer::Position;
#[test]
fn print_sets_terminal_data_correctly() {
let terminal = Box::new(TestTerminal::new());
terminal.print(&Position{ line: 0, offset: 0 }, Style::Default, Colors::Default, &"data");
assert_eq!(terminal.content(), "data");
}
#[test]
fn data_uses_newlines_and_spaces_to_represent_structure() {
let terminal = Box::new(TestTerminal::new());
// Setting a non-zero x coordinate on a previous line exercises column resetting.
terminal.print(&Position{ line: 0, offset: 2 }, Style::Default, Colors::Default, &"some");
terminal.print(&Position{ line: 2, offset: 5 }, Style::Default, Colors::Default, &"data");
assert_eq!(terminal.content(), " some\n\n data");
}
}
| {
for row in self.data.lock().unwrap().iter_mut() {
*row = [None; WIDTH];
}
} | identifier_body |
test_terminal.rs | use crate::input::Key;
use crate::models::application::Event;
use scribe::buffer::Position;
use std::sync::Mutex;
use super::Terminal;
use crate::view::{Colors, Style};
const WIDTH: usize = 10;
const HEIGHT: usize = 10;
// A headless terminal that tracks printed data, which can be
// returned as a String to test display logic of other types.
pub struct TestTerminal {
data: Mutex<[[Option<(char, Colors)>; WIDTH]; HEIGHT]>, // 2D array of chars to represent screen
cursor: Mutex<Option<Position>>,
key_sent: Mutex<bool>
}
impl TestTerminal {
pub fn new() -> TestTerminal {
TestTerminal {
data: Mutex::new([[None; WIDTH]; HEIGHT]),
cursor: Mutex::new(None),
key_sent: Mutex::new(false)
}
}
// Returns a String representation of the printed data.
pub fn content(&self) -> String {
let mut data = String::new();
let mut last_row_with_data = 0;
let mut last_column_with_data = 0;
for (y, row) in self.data.lock().unwrap().iter().enumerate() {
for (x, cell) in row.iter().enumerate() {
if let Some((c, _)) = *cell {
for _ in last_row_with_data..y {
data.push('\n');
last_column_with_data = 0;
}
for _ in last_column_with_data..x {
data.push(' ');
}
data.push(c);
last_row_with_data = y;
// Since the column changes on each character, and we don't
// want to print a space in between every character, we
// set it ahead when we've run into a character to
// differentiate from leading spaces.
last_column_with_data = x+1;
}
}
}
data
}
}
impl Terminal for TestTerminal {
fn listen(&self) -> Option<Event> {
// This implementation will return a key once, followed by nothing.
// This allows us to test both scenarios, the latter being crucial
// to stopping the application in test mode; the input listener only
// checks for kill signals when the terminal returns no input.
let mut key_sent = self.key_sent.lock().unwrap();
if *key_sent {
None
} else {
*key_sent = true;
Some(Event::Key(Key::Char('A')))
}
}
fn clear(&self) {
for row in self.data.lock().unwrap().iter_mut() {
*row = [None; WIDTH];
}
}
fn present(&self) { }
fn width(&self) -> usize { WIDTH }
fn height(&self) -> usize { HEIGHT }
fn set_cursor(&self, position: Option<Position>) {
let mut cursor = self.cursor.lock().unwrap();
*cursor = position;
}
fn suspend(&self) { }
fn print(&self, position: &Position, _: Style, colors: Colors, content: &str) {
// Ignore lines beyond visible height.
if position.line >= self.height() { return; }
let mut data = self.data.lock().unwrap();
let string_content = format!("{}", content);
for (i, c) in string_content.chars().enumerate() {
// Ignore characters beyond visible width.
if i+position.offset >= WIDTH { break; }
data[position.line][i+position.offset] = Some((c, colors));
}
}
}
#[cfg(test)]
mod tests {
use crate::view::terminal::Terminal;
use super::TestTerminal;
use crate::view::{Colors, Style};
use scribe::buffer::Position;
#[test]
fn print_sets_terminal_data_correctly() {
let terminal = Box::new(TestTerminal::new());
terminal.print(&Position{ line: 0, offset: 0 }, Style::Default, Colors::Default, &"data");
assert_eq!(terminal.content(), "data");
}
#[test]
fn | () {
let terminal = Box::new(TestTerminal::new());
// Setting a non-zero x coordinate on a previous line exercises column resetting.
terminal.print(&Position{ line: 0, offset: 2 }, Style::Default, Colors::Default, &"some");
terminal.print(&Position{ line: 2, offset: 5 }, Style::Default, Colors::Default, &"data");
assert_eq!(terminal.content(), " some\n\n data");
}
}
| data_uses_newlines_and_spaces_to_represent_structure | identifier_name |
rw_lock.rs | use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::fmt;
use core::default::Default;
use util::cpu_relax;
/// A reader-writer lock
///
/// This type of lock allows a number of readers or at most one writer at any
/// point in time. The write portion of this lock typically allows modification
/// of the underlying data (exclusive access) and the read portion of this lock
/// typically allows for read-only access (shared access).
///
/// The type parameter `T` represents the data that this lock protects. It is
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
/// allow concurrent access through readers. The RAII guards returned from the
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
/// Based on
/// https://jfdube.wordpress.com/2014/01/03/implementing-a-recursive-read-write-spinlock/
///
/// # Examples
///
/// ```
/// use spin;
///
/// let lock = spin::RwLock::new(5);
///
/// // many reader locks can be held at once
/// {
/// let r1 = lock.read();
/// let r2 = lock.read();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
/// let mut w = lock.write();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
pub struct RwLock<T:?Sized>
{
lock: AtomicUsize,
data: UnsafeCell<T>,
}
/// A guard to which the protected data can be read
///
/// When the guard falls out of scope it will decrement the read count,
/// potentially releasing the lock.
pub struct RwLockReadGuard<'a, T: 'a +?Sized>
{
lock: &'a AtomicUsize,
data: &'a T,
}
/// A guard to which the protected data can be written
///
/// When the guard falls out of scope it will release the lock.
pub struct RwLockWriteGuard<'a, T: 'a +?Sized>
{
lock: &'a AtomicUsize,
data: &'a mut T,
}
// Same unsafe impls as `std::sync::RwLock`
unsafe impl<T:?Sized + Send + Sync> Send for RwLock<T> {}
unsafe impl<T:?Sized + Send + Sync> Sync for RwLock<T> {}
const USIZE_MSB: usize = ::core::isize::MIN as usize;
impl<T> RwLock<T>
{
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
///
/// ```
/// #![feature(const_fn)]
/// use spin;
///
/// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
///
/// fn demo() {
/// let lock = RW_LOCK.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
#[cfg(feature = "const_fn")]
pub const fn new(user_data: T) -> RwLock<T>
{
RwLock
{
lock: ATOMIC_USIZE_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Creates a new spinlock wrapping the supplied data.
///
/// If you want to use it statically, you can use the `const_fn` feature.
///
/// ```
/// use spin;
///
/// fn demo() {
/// let rw_lock = spin::RwLock::new(());
/// let lock = rw_lock.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
#[cfg(not(feature = "const_fn"))]
pub fn new(user_data: T) -> RwLock<T>
{
RwLock
{
lock: ATOMIC_USIZE_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Consumes this `RwLock`, returning the underlying data.
pub fn into_inner(self) -> T
{
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let RwLock { data,.. } = self;
data.into_inner()
}
}
impl<T:?Sized> RwLock<T>
{
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which
/// hold the lock. There may be other readers currently inside the lock when
/// this method returns. This method does not provide any guarantees with
/// respect to the ordering of whether contentious readers or writers will
/// acquire the lock first.
///
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.read();
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T>
{
// (funny do-while loop)
while {
// Old value, with write bit unset
let mut old;
// Wait for for writer to go away before doing expensive atomic ops
// (funny do-while loop)
while {
old = self.lock.load(Ordering::Relaxed);
old & USIZE_MSB!= 0
} {
cpu_relax();
}
// unset write bit
old &=!USIZE_MSB;
let new = old + 1;
debug_assert!(new!= (!USIZE_MSB) & (!0));
self.lock.compare_and_swap(old, new, Ordering::SeqCst)!= old
} {
cpu_relax();
}
RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
}
}
/// Attempt to acquire this lock with shared read access.
///
/// This function will never block and will return immediately if `read`
/// would otherwise succeed. Returns `Some` of an RAII guard which will
/// release the shared access of this thread when dropped, or `None` if the
/// access could not be granted. This method does not provide any
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_read() {
/// Some(data) => {
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<T>>
{
// Old value, with write bit unset
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
let new = old + 1;
debug_assert!(new!= (!USIZE_MSB) & (!0));
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
Some(RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
})
} else {
None
}
}
/// Lock this rwlock with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this rwlock
/// when dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.write();
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T>
{
loop
{
// Old value, with write bit unset.
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
// Old value, with write bit set.
let new = USIZE_MSB | old;
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
// Wait for readers to go away, then lock is ours.
while self.lock.load(Ordering::Relaxed)!= USIZE_MSB {
cpu_relax();
}
break
}
}
RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
/// Attempt to lock this rwlock with exclusive write access.
///
/// This function does not ever block, and it will return `None` if a call
/// to `write` would otherwise block. If successful, an RAII guard is
/// returned.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_write() {
/// Some(mut data) => {
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is implicitly dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>>
{
if self.lock.compare_and_swap(0,
USIZE_MSB,
Ordering::SeqCst) == 0
{
Some(RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
})
} else {
None
}
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for RwLock<T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
match self.try_read()
{
Some(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
None => write!(f, "RwLock {{ <locked> }}"),
}
}
}
impl<T:?Sized + Default> Default for RwLock<T> {
fn default() -> RwLock<T> {
RwLock::new(Default::default())
}
}
impl<'rwlock, T:?Sized> Deref for RwLockReadGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
}
impl<'rwlock, T:?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
}
impl<'rwlock, T:?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
fn deref_mut(&mut self) -> &mut T { self.data }
}
impl<'rwlock, T:?Sized> Drop for RwLockReadGuard<'rwlock, T> {
fn drop(&mut self) |
}
impl<'rwlock, T:?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed), USIZE_MSB);
self.lock.store(0, Ordering::Relaxed);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use super::*;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = RwLock::new(());
drop(l.read());
drop(l.write());
drop((l.read(), l.read()));
drop(l.write());
}
// TODO: needs RNG
//#[test]
//fn frob() {
// static R: RwLock = RwLock::new();
// const N: usize = 10;
// const M: usize = 1000;
//
// let (tx, rx) = channel::<()>();
// for _ in 0..N {
// let tx = tx.clone();
// thread::spawn(move|| {
// let mut rng = rand::thread_rng();
// for _ in 0..M {
// if rng.gen_weighted_bool(N) {
// drop(R.write());
// } else {
// drop(R.read());
// }
// }
// drop(tx);
// });
// }
// drop(tx);
// let _ = rx.recv();
// unsafe { R.destroy(); }
//}
#[test]
fn test_rw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move|| {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move|| {
let lock = arc3.read();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 10);
}
#[test]
fn test_rw_arc_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
#[test]
fn test_rwlock_unsized() {
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
{
let b = &mut *rw.write();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*rw.read(), comp);
}
#[test]
fn test_rwlock_try_write() {
use std::mem::drop;
let lock = RwLock::new(0isize);
let read_guard = lock.read();
let write_result = lock.try_write();
match write_result {
None => (),
Some(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
}
drop(read_guard);
}
#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
}
| {
debug_assert!(self.lock.load(Ordering::Relaxed) & (!USIZE_MSB) > 0);
self.lock.fetch_sub(1, Ordering::SeqCst);
} | identifier_body |
rw_lock.rs | use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::fmt;
use core::default::Default;
use util::cpu_relax;
/// A reader-writer lock
///
/// This type of lock allows a number of readers or at most one writer at any
/// point in time. The write portion of this lock typically allows modification
/// of the underlying data (exclusive access) and the read portion of this lock
/// typically allows for read-only access (shared access).
///
/// The type parameter `T` represents the data that this lock protects. It is
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
/// allow concurrent access through readers. The RAII guards returned from the
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
/// Based on
/// https://jfdube.wordpress.com/2014/01/03/implementing-a-recursive-read-write-spinlock/
///
/// # Examples
///
/// ```
/// use spin;
///
/// let lock = spin::RwLock::new(5);
///
/// // many reader locks can be held at once
/// {
/// let r1 = lock.read();
/// let r2 = lock.read();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
/// let mut w = lock.write();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
pub struct RwLock<T:?Sized>
{
lock: AtomicUsize,
data: UnsafeCell<T>,
}
/// A guard to which the protected data can be read
///
/// When the guard falls out of scope it will decrement the read count,
/// potentially releasing the lock.
pub struct RwLockReadGuard<'a, T: 'a +?Sized>
{
lock: &'a AtomicUsize,
data: &'a T,
}
/// A guard to which the protected data can be written
///
/// When the guard falls out of scope it will release the lock.
pub struct RwLockWriteGuard<'a, T: 'a +?Sized>
{
lock: &'a AtomicUsize,
data: &'a mut T,
}
// Same unsafe impls as `std::sync::RwLock`
unsafe impl<T:?Sized + Send + Sync> Send for RwLock<T> {}
unsafe impl<T:?Sized + Send + Sync> Sync for RwLock<T> {}
const USIZE_MSB: usize = ::core::isize::MIN as usize;
impl<T> RwLock<T>
{
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
///
/// ```
/// #![feature(const_fn)]
/// use spin;
///
/// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
///
/// fn demo() {
/// let lock = RW_LOCK.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
#[cfg(feature = "const_fn")]
pub const fn new(user_data: T) -> RwLock<T>
{
RwLock
{
lock: ATOMIC_USIZE_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Creates a new spinlock wrapping the supplied data.
///
/// If you want to use it statically, you can use the `const_fn` feature.
///
/// ```
/// use spin;
///
/// fn demo() {
/// let rw_lock = spin::RwLock::new(());
/// let lock = rw_lock.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
#[cfg(not(feature = "const_fn"))]
pub fn new(user_data: T) -> RwLock<T>
{
RwLock
{
lock: ATOMIC_USIZE_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Consumes this `RwLock`, returning the underlying data.
pub fn into_inner(self) -> T
{
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let RwLock { data,.. } = self;
data.into_inner()
}
}
impl<T:?Sized> RwLock<T>
{
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which
/// hold the lock. There may be other readers currently inside the lock when
/// this method returns. This method does not provide any guarantees with
/// respect to the ordering of whether contentious readers or writers will
/// acquire the lock first.
///
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.read();
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T>
{
// (funny do-while loop)
while {
// Old value, with write bit unset
let mut old;
// Wait for for writer to go away before doing expensive atomic ops
// (funny do-while loop)
while {
old = self.lock.load(Ordering::Relaxed);
old & USIZE_MSB!= 0
} {
cpu_relax();
}
// unset write bit
old &=!USIZE_MSB;
let new = old + 1;
debug_assert!(new!= (!USIZE_MSB) & (!0));
self.lock.compare_and_swap(old, new, Ordering::SeqCst)!= old
} {
cpu_relax();
}
RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
}
}
/// Attempt to acquire this lock with shared read access.
///
/// This function will never block and will return immediately if `read`
/// would otherwise succeed. Returns `Some` of an RAII guard which will
/// release the shared access of this thread when dropped, or `None` if the
/// access could not be granted. This method does not provide any
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_read() {
/// Some(data) => {
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<T>>
{
// Old value, with write bit unset
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
let new = old + 1;
debug_assert!(new!= (!USIZE_MSB) & (!0));
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
Some(RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
})
} else {
None
}
}
/// Lock this rwlock with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this rwlock
/// when dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.write();
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T>
{
loop
{
// Old value, with write bit unset.
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
// Old value, with write bit set.
let new = USIZE_MSB | old;
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
// Wait for readers to go away, then lock is ours.
while self.lock.load(Ordering::Relaxed)!= USIZE_MSB {
cpu_relax();
}
break
}
}
RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
/// Attempt to lock this rwlock with exclusive write access.
///
/// This function does not ever block, and it will return `None` if a call
/// to `write` would otherwise block. If successful, an RAII guard is
/// returned.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_write() {
/// Some(mut data) => {
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is implicitly dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>>
{
if self.lock.compare_and_swap(0,
USIZE_MSB,
Ordering::SeqCst) == 0
{
Some(RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
})
} else {
None
}
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for RwLock<T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
match self.try_read()
{
Some(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
None => write!(f, "RwLock {{ <locked> }}"),
}
}
}
impl<T:?Sized + Default> Default for RwLock<T> {
fn default() -> RwLock<T> {
RwLock::new(Default::default())
}
}
impl<'rwlock, T:?Sized> Deref for RwLockReadGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
}
impl<'rwlock, T:?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
}
impl<'rwlock, T:?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
fn deref_mut(&mut self) -> &mut T { self.data }
}
impl<'rwlock, T:?Sized> Drop for RwLockReadGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & (!USIZE_MSB) > 0);
self.lock.fetch_sub(1, Ordering::SeqCst);
}
}
impl<'rwlock, T:?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed), USIZE_MSB);
self.lock.store(0, Ordering::Relaxed);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use super::*;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = RwLock::new(());
drop(l.read());
drop(l.write());
drop((l.read(), l.read()));
drop(l.write());
}
// TODO: needs RNG
//#[test]
//fn frob() {
// static R: RwLock = RwLock::new();
// const N: usize = 10;
// const M: usize = 1000;
//
// let (tx, rx) = channel::<()>();
// for _ in 0..N {
// let tx = tx.clone();
// thread::spawn(move|| {
// let mut rng = rand::thread_rng();
// for _ in 0..M {
// if rng.gen_weighted_bool(N) {
// drop(R.write());
// } else {
// drop(R.read());
// }
// }
// drop(tx);
// });
// }
// drop(tx);
// let _ = rx.recv();
// unsafe { R.destroy(); }
//}
#[test]
fn test_rw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move|| {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move|| {
let lock = arc3.read(); | }
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 10);
}
#[test]
fn test_rw_arc_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
#[test]
fn test_rwlock_unsized() {
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
{
let b = &mut *rw.write();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*rw.read(), comp);
}
#[test]
fn test_rwlock_try_write() {
use std::mem::drop;
let lock = RwLock::new(0isize);
let read_guard = lock.read();
let write_result = lock.try_write();
match write_result {
None => (),
Some(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
}
drop(read_guard);
}
#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
} | assert!(*lock >= 0);
})); | random_line_split |
rw_lock.rs | use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::fmt;
use core::default::Default;
use util::cpu_relax;
/// A reader-writer lock
///
/// This type of lock allows a number of readers or at most one writer at any
/// point in time. The write portion of this lock typically allows modification
/// of the underlying data (exclusive access) and the read portion of this lock
/// typically allows for read-only access (shared access).
///
/// The type parameter `T` represents the data that this lock protects. It is
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
/// allow concurrent access through readers. The RAII guards returned from the
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
/// Based on
/// https://jfdube.wordpress.com/2014/01/03/implementing-a-recursive-read-write-spinlock/
///
/// # Examples
///
/// ```
/// use spin;
///
/// let lock = spin::RwLock::new(5);
///
/// // many reader locks can be held at once
/// {
/// let r1 = lock.read();
/// let r2 = lock.read();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
/// let mut w = lock.write();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
pub struct RwLock<T:?Sized>
{
lock: AtomicUsize,
data: UnsafeCell<T>,
}
/// A guard to which the protected data can be read
///
/// When the guard falls out of scope it will decrement the read count,
/// potentially releasing the lock.
pub struct RwLockReadGuard<'a, T: 'a +?Sized>
{
lock: &'a AtomicUsize,
data: &'a T,
}
/// A guard to which the protected data can be written
///
/// When the guard falls out of scope it will release the lock.
pub struct RwLockWriteGuard<'a, T: 'a +?Sized>
{
lock: &'a AtomicUsize,
data: &'a mut T,
}
// Same unsafe impls as `std::sync::RwLock`
unsafe impl<T:?Sized + Send + Sync> Send for RwLock<T> {}
unsafe impl<T:?Sized + Send + Sync> Sync for RwLock<T> {}
const USIZE_MSB: usize = ::core::isize::MIN as usize;
impl<T> RwLock<T>
{
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
///
/// ```
/// #![feature(const_fn)]
/// use spin;
///
/// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
///
/// fn demo() {
/// let lock = RW_LOCK.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
#[cfg(feature = "const_fn")]
pub const fn new(user_data: T) -> RwLock<T>
{
RwLock
{
lock: ATOMIC_USIZE_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Creates a new spinlock wrapping the supplied data.
///
/// If you want to use it statically, you can use the `const_fn` feature.
///
/// ```
/// use spin;
///
/// fn demo() {
/// let rw_lock = spin::RwLock::new(());
/// let lock = rw_lock.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
#[cfg(not(feature = "const_fn"))]
pub fn new(user_data: T) -> RwLock<T>
{
RwLock
{
lock: ATOMIC_USIZE_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Consumes this `RwLock`, returning the underlying data.
pub fn into_inner(self) -> T
{
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let RwLock { data,.. } = self;
data.into_inner()
}
}
impl<T:?Sized> RwLock<T>
{
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which
/// hold the lock. There may be other readers currently inside the lock when
/// this method returns. This method does not provide any guarantees with
/// respect to the ordering of whether contentious readers or writers will
/// acquire the lock first.
///
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.read();
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T>
{
// (funny do-while loop)
while {
// Old value, with write bit unset
let mut old;
// Wait for for writer to go away before doing expensive atomic ops
// (funny do-while loop)
while {
old = self.lock.load(Ordering::Relaxed);
old & USIZE_MSB!= 0
} {
cpu_relax();
}
// unset write bit
old &=!USIZE_MSB;
let new = old + 1;
debug_assert!(new!= (!USIZE_MSB) & (!0));
self.lock.compare_and_swap(old, new, Ordering::SeqCst)!= old
} {
cpu_relax();
}
RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
}
}
/// Attempt to acquire this lock with shared read access.
///
/// This function will never block and will return immediately if `read`
/// would otherwise succeed. Returns `Some` of an RAII guard which will
/// release the shared access of this thread when dropped, or `None` if the
/// access could not be granted. This method does not provide any
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_read() {
/// Some(data) => {
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<T>>
{
// Old value, with write bit unset
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
let new = old + 1;
debug_assert!(new!= (!USIZE_MSB) & (!0));
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
Some(RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
})
} else {
None
}
}
/// Lock this rwlock with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this rwlock
/// when dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.write();
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T>
{
loop
{
// Old value, with write bit unset.
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
// Old value, with write bit set.
let new = USIZE_MSB | old;
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
// Wait for readers to go away, then lock is ours.
while self.lock.load(Ordering::Relaxed)!= USIZE_MSB {
cpu_relax();
}
break
}
}
RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
/// Attempt to lock this rwlock with exclusive write access.
///
/// This function does not ever block, and it will return `None` if a call
/// to `write` would otherwise block. If successful, an RAII guard is
/// returned.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_write() {
/// Some(mut data) => {
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is implicitly dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>>
{
if self.lock.compare_and_swap(0,
USIZE_MSB,
Ordering::SeqCst) == 0
{
Some(RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
})
} else {
None
}
}
}
impl<T:?Sized + fmt::Debug> fmt::Debug for RwLock<T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
match self.try_read()
{
Some(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard),
None => write!(f, "RwLock {{ <locked> }}"),
}
}
}
impl<T:?Sized + Default> Default for RwLock<T> {
fn default() -> RwLock<T> {
RwLock::new(Default::default())
}
}
impl<'rwlock, T:?Sized> Deref for RwLockReadGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
}
impl<'rwlock, T:?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
}
impl<'rwlock, T:?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
fn deref_mut(&mut self) -> &mut T { self.data }
}
impl<'rwlock, T:?Sized> Drop for RwLockReadGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & (!USIZE_MSB) > 0);
self.lock.fetch_sub(1, Ordering::SeqCst);
}
}
impl<'rwlock, T:?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed), USIZE_MSB);
self.lock.store(0, Ordering::Relaxed);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use super::*;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = RwLock::new(());
drop(l.read());
drop(l.write());
drop((l.read(), l.read()));
drop(l.write());
}
// TODO: needs RNG
//#[test]
//fn frob() {
// static R: RwLock = RwLock::new();
// const N: usize = 10;
// const M: usize = 1000;
//
// let (tx, rx) = channel::<()>();
// for _ in 0..N {
// let tx = tx.clone();
// thread::spawn(move|| {
// let mut rng = rand::thread_rng();
// for _ in 0..M {
// if rng.gen_weighted_bool(N) {
// drop(R.write());
// } else {
// drop(R.read());
// }
// }
// drop(tx);
// });
// }
// drop(tx);
// let _ = rx.recv();
// unsafe { R.destroy(); }
//}
#[test]
fn test_rw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move|| {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move|| {
let lock = arc3.read();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 10);
}
#[test]
fn test_rw_arc_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
#[test]
fn test_rwlock_unsized() {
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
{
let b = &mut *rw.write();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*rw.read(), comp);
}
#[test]
fn test_rwlock_try_write() {
use std::mem::drop;
let lock = RwLock::new(0isize);
let read_guard = lock.read();
let write_result = lock.try_write();
match write_result {
None => (),
Some(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
}
drop(read_guard);
}
#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn | (&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
}
| drop | identifier_name |
root.rs | /*
The library provides a simple datastructure to access geolocated labels with an additional
elimination time t and a label size factor. The library provides method to query a set of
such labels with a bounding box and a minimum elimination time.
Copyright (C) {2017} {Filip Krumpe <[email protected]}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::f64;
use std::cmp::Ordering;
use primitives::label::Label;
use primitives::bbox::BBox;
///
/// Represent the possible split dimensions.
///
#[derive(PartialEq)]
enum SplitDimension {
X,
Y,
UNDEF,
}
///
/// The struct defines a tree node.
///
/// The tree nodes members are the labels t value, the label itself, the split type (X, Y or UNDEF
/// in case the node is a leaf node).
///
/// The split value indicates the maximum value of the left children in the corresponding
/// dimension. The split value is guaranteed to be less than the corresponding coordinate of the
/// right children.
///
/// Left and right child are some indices, if there is a left or right subtree and none otherwise.
pub struct Root {
m_t: f64,
m_data: Label,
m_type: SplitDimension,
m_split: f64,
m_left_child: Option<usize>,
m_right_child: Option<usize>,
}
impl Root {
///
/// Construct a new root from the given label
///
/// Note: The function only contains the given label. No subtrees of connenctions to other
/// tree nodes are constructed.
///
/// To construct a single tree from a forest of root nodes use the Root::init_pst3d(...)
/// function.
///
pub fn new(l: Label) -> Root {
Root {
m_t: l.get_t(),
m_data: l,
m_type: SplitDimension::UNDEF,
m_split: f64::NAN,
m_left_child: None,
m_right_child: None,
}
}
///
/// Initialize a single 3D PST out of a forest of root nodes and return the root node index.
///
/// The function will mutate the given root nodes and set the corresponding split type, split
/// value and left and right subtree indices.
///
/// The function returns the index of the root node in the data array.
///
pub fn init_pst3d(mut data: &mut Vec<Root>) -> Option<usize> {
let mut refs: Vec<RootRef> = Vec::with_capacity(data.len());
data.sort_by(|first, second| if first.m_t < second.m_t {
Ordering::Less
} else if first.m_t > second.m_t {
Ordering::Greater
} else {
Ordering::Equal
});
data.reverse();
for (idx, d) in data.iter().enumerate() {
refs.push(RootRef::new(d, idx));
}
let initial_dimension = SplitDimension::X;
create_root(refs, &mut data, &initial_dimension)
}
///
/// Get a vector of references to the elements in the 3d PST with t >= min_t and that are
/// contained in bbox.
///
pub fn get<'a>(&'a self, bbox: &BBox, min_t: f64, data: &'a Vec<Root>) -> Vec<&'a Label> {
let mut r: Vec<&Label> = Vec::new();
if self.m_t <= min_t {
return r;
}
if bbox.is_contained(&self.m_data) {
r.push(&self.m_data);
}
// append the left child if it exists and is cut by the bounding box
if let Some(idx) = self.m_left_child {
let append = match self.m_type {
SplitDimension::X => bbox.get_min_x() <= self.m_split,
SplitDimension::Y => bbox.get_min_y() <= self.m_split,
SplitDimension::UNDEF => false,
};
if append {
assert!(idx < data.len());
let mut res = data[idx].get(&bbox, min_t, &data);
r.append(&mut res);
}
}
// append the right child if it exists and is cut by the bounding box
if let Some(idx) = self.m_right_child {
let append = match self.m_type {
SplitDimension::X => bbox.get_max_x() > self.m_split,
SplitDimension::Y => bbox.get_max_y() > self.m_split,
SplitDimension::UNDEF => false,
};
if append {
assert!(idx < data.len());
let mut res = data[idx].get(&bbox, min_t, &data);
r.append(&mut res);
}
}
r
}
///
/// Get a human readable string representation of the tree rooted at self.
///
/// A such string will look like:
///
/// ```text
/// x-node (split: 4.5): Label [#1]: 'T1' at (1, 2) with prio 1,elim-t: 9 and label factor: \
/// 1.5
/// l y-node (split: 4.5): Label [#2]: 'T2' at (2, 3) with prio 1, elim-t: 8 and label \
/// factor: 1.5
/// l x-node (split: NaN): Label [#3]: 'T3' at (3, 4) with prio 1, elim-t: 7 and \
/// label factor: 1.5
/// ```
///
pub fn to_string(&self, level: i32, data: &Vec<Root>) -> String {
// prefix is level x p
let p = " ";
let mut prefix = String::new();
for _ in 0..level {
prefix = format!("{}{}", prefix, p);
}
let mut result = match self.m_type {
SplitDimension::X => {
format!("{}x-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
SplitDimension::Y => {
format!("{}y-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
SplitDimension::UNDEF => {
format!("{}leaf-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
};
// append the left subtree
if let Some(idx) = self.m_left_child {
assert!(idx < data.len());
result = format!("{}\nl{}", result, data[idx].to_string(level + 1, &data));
}
// append the right subtree
if let Some(idx) = self.m_right_child {
assert!(idx < data.len());
result = format!("{}\nr{}", result, data[idx].to_string(level + 1, &data));
}
result
}
}
///
/// The struct represents a reference to a root node and contains all the information required to
/// construct the 3D PST.
///
#[derive(Debug)]
struct RootRef {
m_x: f64,
m_y: f64,
m_t: f64,
m_idx: usize,
}
impl RootRef {
///
/// Initialize a new root ref
///
fn new(r: &Root, idx: usize) -> RootRef {
RootRef {
m_t: r.m_data.get_t(),
m_x: r.m_data.get_x(),
m_y: r.m_data.get_y(),
m_idx: idx,
}
}
///
/// Compare two Root refs with respect to the t value.
///
fn order_by_t(first: &Self, second: &Self) -> Ordering {
if first.m_t < second.m_t {
Ordering::Less
} else if first.m_t > second.m_t {
Ordering::Greater
} else {
Ordering::Equal
}
}
///
/// Compare two Root refs with respect to the x value.
///
fn order_by_x(first: &Self, second: &Self) -> Ordering {
if first.m_x < second.m_x {
Ordering::Less
} else if first.m_x > second.m_x {
Ordering::Greater
} else {
Ordering::Equal
}
}
///
/// Compare two Root refs with respect to the y value.
///
fn order_by_y(first: &Self, second: &Self) -> Ordering {
if first.m_y < second.m_y {
Ordering::Less
} else if first.m_y > second.m_y {
Ordering::Greater
} else {
Ordering::Equal
}
}
}
///
/// In the RootRef vector find the index of the root with the maximum t value.
///
fn find_root_idx(refs: &mut Vec<RootRef>) -> usize {
let mut max_t = 0.;
let mut max_idx = 0;
for (idx, e) in refs.iter().enumerate() {
if e.m_t > max_t {
max_t = e.m_t;
max_idx = idx;
}
}
let r = refs.swap_remove(max_idx);
assert!(r.m_t == max_t);
r.m_idx
}
///
/// From the given RootRef vector construct the subtree and update the corresponding root nodes in
/// the data vector.
///
/// The element with the maximum t value will be set as root with the corresponding split
/// dimension. The remaining elements will sorted by the split dimension. The split value is the
/// corresponding coordinate of item floor(|root_refs| / 2) and the elements are splitted into <=
/// and >.
///
/// From the <= elements the left subtree is constructed recursively with swapped split dimension.
/// Same for the > elements as the right subtree.
///
/// For the nodes in data that are referenced by RootRefs in root_refs the corresponding Roots are
/// updated accordingly.
///
fn create_root(mut root_refs: Vec<RootRef>,
mut data: &mut Vec<Root>,
dim: &SplitDimension)
-> Option<usize> {
if root_refs.is_empty() {
return None;
}
let size1 = root_refs.len();
assert!(*dim!= SplitDimension::UNDEF);
let is_x = *dim == SplitDimension::X;
// find the element with the maximum t value, remove the corresonding RootRef
let root_idx = find_root_idx(&mut root_refs);
// the sub dimension flips from X to Y or from Y to X
let sub_dim = if is_x {
SplitDimension::Y
} else {
SplitDimension::X
};
let mut split_value = f64::NAN;
let mut left_child_idx: Option<usize> = None;
let mut right_child_idx: Option<usize> = None;
let order_asc = if is_x {
RootRef::order_by_x
} else {
RootRef::order_by_y
};
if root_refs.len() == 1 {
split_value = if is_x {
root_refs[0].m_x
} else {
root_refs[0].m_y
};
left_child_idx = create_root(root_refs, &mut data, &sub_dim);
} else if root_refs.len() > 1 {
root_refs.sort_by(order_asc);
// take the x value of the median element as the new split value
let mut median_idx = root_refs.len() / 2 - 1;
split_value = if is_x {
root_refs[median_idx].m_x
} else {
root_refs[median_idx].m_y
};
// ensure that the right children realy have a value > m_split
if is_x {
while median_idx < root_refs.len() && root_refs[median_idx].m_x == split_value {
median_idx = median_idx + 1;
}
} else {
while median_idx < root_refs.len() && root_refs[median_idx].m_y == split_value {
median_idx = median_idx + 1;
}
}
let size2 = root_refs.len();
assert!(size1 == size2 + 1);
// split the data at the median point:
let last = root_refs.split_off(median_idx);
assert!(size2 == root_refs.len() + last.len());
left_child_idx = create_root(root_refs, &mut data, &sub_dim);
right_child_idx = create_root(last, &mut data, &sub_dim);
}
let r = data.get_mut(root_idx)
.expect("Trying to access element at not existing vector position");
r.m_type = if is_x {
SplitDimension::X | } else {
SplitDimension::Y
};
r.m_split = split_value;
r.m_left_child = left_child_idx;
r.m_right_child = right_child_idx;
Some(root_idx)
}
#[test]
fn test_root_new() {
let r = Root::new(Label::new(1., 2., 9., 1, 1, 1.5, "A".to_string()));
assert!(r.m_t == 9.);
assert!(*r.m_data.get_label() == "A".to_string());
assert!(r.m_type == SplitDimension::UNDEF);
}
#[test]
fn test_pst_init() {
let mut f: Vec<Root> = Vec::new();
f.push(Root::new(Label::new(1., 2., 9., 1, 1, 1.5, "A".to_string())));
f.push(Root::new(Label::new(2., 3., 8., 2, 1, 1.5, "B".to_string())));
f.push(Root::new(Label::new(3., 4., 7., 3, 1, 1.5, "C".to_string())));
let root = Root::init_pst3d(&mut f);
let root_idx = root.unwrap();
println!("{}", f[root_idx].to_string(0, &f));
assert!(root_idx == 0);
assert!(f[root_idx].m_type == SplitDimension::X);
assert!(f[root_idx].m_left_child.is_some());
assert!(f[root_idx].m_right_child.is_some());
assert!(f[root_idx].m_left_child.unwrap() == 1);
assert!(f[root_idx].m_right_child.unwrap() == 2);
} | random_line_split |
|
root.rs | /*
The library provides a simple datastructure to access geolocated labels with an additional
elimination time t and a label size factor. The library provides method to query a set of
such labels with a bounding box and a minimum elimination time.
Copyright (C) {2017} {Filip Krumpe <[email protected]}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::f64;
use std::cmp::Ordering;
use primitives::label::Label;
use primitives::bbox::BBox;
///
/// Represent the possible split dimensions.
///
#[derive(PartialEq)]
enum SplitDimension {
X,
Y,
UNDEF,
}
///
/// The struct defines a tree node.
///
/// The tree nodes members are the labels t value, the label itself, the split type (X, Y or UNDEF
/// in case the node is a leaf node).
///
/// The split value indicates the maximum value of the left children in the corresponding
/// dimension. The split value is guaranteed to be less than the corresponding coordinate of the
/// right children.
///
/// Left and right child are some indices, if there is a left or right subtree and none otherwise.
pub struct Root {
m_t: f64,
m_data: Label,
m_type: SplitDimension,
m_split: f64,
m_left_child: Option<usize>,
m_right_child: Option<usize>,
}
impl Root {
///
/// Construct a new root from the given label
///
/// Note: The function only contains the given label. No subtrees of connenctions to other
/// tree nodes are constructed.
///
/// To construct a single tree from a forest of root nodes use the Root::init_pst3d(...)
/// function.
///
pub fn new(l: Label) -> Root {
Root {
m_t: l.get_t(),
m_data: l,
m_type: SplitDimension::UNDEF,
m_split: f64::NAN,
m_left_child: None,
m_right_child: None,
}
}
///
/// Initialize a single 3D PST out of a forest of root nodes and return the root node index.
///
/// The function will mutate the given root nodes and set the corresponding split type, split
/// value and left and right subtree indices.
///
/// The function returns the index of the root node in the data array.
///
pub fn init_pst3d(mut data: &mut Vec<Root>) -> Option<usize> {
let mut refs: Vec<RootRef> = Vec::with_capacity(data.len());
data.sort_by(|first, second| if first.m_t < second.m_t {
Ordering::Less
} else if first.m_t > second.m_t {
Ordering::Greater
} else {
Ordering::Equal
});
data.reverse();
for (idx, d) in data.iter().enumerate() {
refs.push(RootRef::new(d, idx));
}
let initial_dimension = SplitDimension::X;
create_root(refs, &mut data, &initial_dimension)
}
///
/// Get a vector of references to the elements in the 3d PST with t >= min_t and that are
/// contained in bbox.
///
pub fn get<'a>(&'a self, bbox: &BBox, min_t: f64, data: &'a Vec<Root>) -> Vec<&'a Label> {
let mut r: Vec<&Label> = Vec::new();
if self.m_t <= min_t {
return r;
}
if bbox.is_contained(&self.m_data) {
r.push(&self.m_data);
}
// append the left child if it exists and is cut by the bounding box
if let Some(idx) = self.m_left_child {
let append = match self.m_type {
SplitDimension::X => bbox.get_min_x() <= self.m_split,
SplitDimension::Y => bbox.get_min_y() <= self.m_split,
SplitDimension::UNDEF => false,
};
if append {
assert!(idx < data.len());
let mut res = data[idx].get(&bbox, min_t, &data);
r.append(&mut res);
}
}
// append the right child if it exists and is cut by the bounding box
if let Some(idx) = self.m_right_child {
let append = match self.m_type {
SplitDimension::X => bbox.get_max_x() > self.m_split,
SplitDimension::Y => bbox.get_max_y() > self.m_split,
SplitDimension::UNDEF => false,
};
if append {
assert!(idx < data.len());
let mut res = data[idx].get(&bbox, min_t, &data);
r.append(&mut res);
}
}
r
}
///
/// Get a human readable string representation of the tree rooted at self.
///
/// A such string will look like:
///
/// ```text
/// x-node (split: 4.5): Label [#1]: 'T1' at (1, 2) with prio 1,elim-t: 9 and label factor: \
/// 1.5
/// l y-node (split: 4.5): Label [#2]: 'T2' at (2, 3) with prio 1, elim-t: 8 and label \
/// factor: 1.5
/// l x-node (split: NaN): Label [#3]: 'T3' at (3, 4) with prio 1, elim-t: 7 and \
/// label factor: 1.5
/// ```
///
pub fn to_string(&self, level: i32, data: &Vec<Root>) -> String {
// prefix is level x p
let p = " ";
let mut prefix = String::new();
for _ in 0..level {
prefix = format!("{}{}", prefix, p);
}
let mut result = match self.m_type {
SplitDimension::X => {
format!("{}x-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
SplitDimension::Y => {
format!("{}y-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
SplitDimension::UNDEF => {
format!("{}leaf-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
};
// append the left subtree
if let Some(idx) = self.m_left_child |
// append the right subtree
if let Some(idx) = self.m_right_child {
assert!(idx < data.len());
result = format!("{}\nr{}", result, data[idx].to_string(level + 1, &data));
}
result
}
}
///
/// The struct represents a reference to a root node and contains all the information required to
/// construct the 3D PST.
///
#[derive(Debug)]
struct RootRef {
m_x: f64,
m_y: f64,
m_t: f64,
m_idx: usize,
}
impl RootRef {
///
/// Initialize a new root ref
///
fn new(r: &Root, idx: usize) -> RootRef {
RootRef {
m_t: r.m_data.get_t(),
m_x: r.m_data.get_x(),
m_y: r.m_data.get_y(),
m_idx: idx,
}
}
///
/// Compare two Root refs with respect to the t value.
///
fn order_by_t(first: &Self, second: &Self) -> Ordering {
if first.m_t < second.m_t {
Ordering::Less
} else if first.m_t > second.m_t {
Ordering::Greater
} else {
Ordering::Equal
}
}
///
/// Compare two Root refs with respect to the x value.
///
fn order_by_x(first: &Self, second: &Self) -> Ordering {
if first.m_x < second.m_x {
Ordering::Less
} else if first.m_x > second.m_x {
Ordering::Greater
} else {
Ordering::Equal
}
}
///
/// Compare two Root refs with respect to the y value.
///
fn order_by_y(first: &Self, second: &Self) -> Ordering {
if first.m_y < second.m_y {
Ordering::Less
} else if first.m_y > second.m_y {
Ordering::Greater
} else {
Ordering::Equal
}
}
}
///
/// In the RootRef vector find the index of the root with the maximum t value.
///
fn find_root_idx(refs: &mut Vec<RootRef>) -> usize {
let mut max_t = 0.;
let mut max_idx = 0;
for (idx, e) in refs.iter().enumerate() {
if e.m_t > max_t {
max_t = e.m_t;
max_idx = idx;
}
}
let r = refs.swap_remove(max_idx);
assert!(r.m_t == max_t);
r.m_idx
}
///
/// From the given RootRef vector construct the subtree and update the corresponding root nodes in
/// the data vector.
///
/// The element with the maximum t value will be set as root with the corresponding split
/// dimension. The remaining elements will sorted by the split dimension. The split value is the
/// corresponding coordinate of item floor(|root_refs| / 2) and the elements are splitted into <=
/// and >.
///
/// From the <= elements the left subtree is constructed recursively with swapped split dimension.
/// Same for the > elements as the right subtree.
///
/// For the nodes in data that are referenced by RootRefs in root_refs the corresponding Roots are
/// updated accordingly.
///
fn create_root(mut root_refs: Vec<RootRef>,
mut data: &mut Vec<Root>,
dim: &SplitDimension)
-> Option<usize> {
if root_refs.is_empty() {
return None;
}
let size1 = root_refs.len();
assert!(*dim!= SplitDimension::UNDEF);
let is_x = *dim == SplitDimension::X;
// find the element with the maximum t value, remove the corresonding RootRef
let root_idx = find_root_idx(&mut root_refs);
// the sub dimension flips from X to Y or from Y to X
let sub_dim = if is_x {
SplitDimension::Y
} else {
SplitDimension::X
};
let mut split_value = f64::NAN;
let mut left_child_idx: Option<usize> = None;
let mut right_child_idx: Option<usize> = None;
let order_asc = if is_x {
RootRef::order_by_x
} else {
RootRef::order_by_y
};
if root_refs.len() == 1 {
split_value = if is_x {
root_refs[0].m_x
} else {
root_refs[0].m_y
};
left_child_idx = create_root(root_refs, &mut data, &sub_dim);
} else if root_refs.len() > 1 {
root_refs.sort_by(order_asc);
// take the x value of the median element as the new split value
let mut median_idx = root_refs.len() / 2 - 1;
split_value = if is_x {
root_refs[median_idx].m_x
} else {
root_refs[median_idx].m_y
};
// ensure that the right children realy have a value > m_split
if is_x {
while median_idx < root_refs.len() && root_refs[median_idx].m_x == split_value {
median_idx = median_idx + 1;
}
} else {
while median_idx < root_refs.len() && root_refs[median_idx].m_y == split_value {
median_idx = median_idx + 1;
}
}
let size2 = root_refs.len();
assert!(size1 == size2 + 1);
// split the data at the median point:
let last = root_refs.split_off(median_idx);
assert!(size2 == root_refs.len() + last.len());
left_child_idx = create_root(root_refs, &mut data, &sub_dim);
right_child_idx = create_root(last, &mut data, &sub_dim);
}
let r = data.get_mut(root_idx)
.expect("Trying to access element at not existing vector position");
r.m_type = if is_x {
SplitDimension::X
} else {
SplitDimension::Y
};
r.m_split = split_value;
r.m_left_child = left_child_idx;
r.m_right_child = right_child_idx;
Some(root_idx)
}
#[test]
fn test_root_new() {
let r = Root::new(Label::new(1., 2., 9., 1, 1, 1.5, "A".to_string()));
assert!(r.m_t == 9.);
assert!(*r.m_data.get_label() == "A".to_string());
assert!(r.m_type == SplitDimension::UNDEF);
}
#[test]
fn test_pst_init() {
let mut f: Vec<Root> = Vec::new();
f.push(Root::new(Label::new(1., 2., 9., 1, 1, 1.5, "A".to_string())));
f.push(Root::new(Label::new(2., 3., 8., 2, 1, 1.5, "B".to_string())));
f.push(Root::new(Label::new(3., 4., 7., 3, 1, 1.5, "C".to_string())));
let root = Root::init_pst3d(&mut f);
let root_idx = root.unwrap();
println!("{}", f[root_idx].to_string(0, &f));
assert!(root_idx == 0);
assert!(f[root_idx].m_type == SplitDimension::X);
assert!(f[root_idx].m_left_child.is_some());
assert!(f[root_idx].m_right_child.is_some());
assert!(f[root_idx].m_left_child.unwrap() == 1);
assert!(f[root_idx].m_right_child.unwrap() == 2);
}
| {
assert!(idx < data.len());
result = format!("{}\nl{}", result, data[idx].to_string(level + 1, &data));
} | conditional_block |
root.rs | /*
The library provides a simple datastructure to access geolocated labels with an additional
elimination time t and a label size factor. The library provides method to query a set of
such labels with a bounding box and a minimum elimination time.
Copyright (C) {2017} {Filip Krumpe <[email protected]}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use std::f64;
use std::cmp::Ordering;
use primitives::label::Label;
use primitives::bbox::BBox;
///
/// Represent the possible split dimensions.
///
#[derive(PartialEq)]
enum SplitDimension {
X,
Y,
UNDEF,
}
///
/// The struct defines a tree node.
///
/// The tree nodes members are the labels t value, the label itself, the split type (X, Y or UNDEF
/// in case the node is a leaf node).
///
/// The split value indicates the maximum value of the left children in the corresponding
/// dimension. The split value is guaranteed to be less than the corresponding coordinate of the
/// right children.
///
/// Left and right child are some indices, if there is a left or right subtree and none otherwise.
pub struct Root {
m_t: f64,
m_data: Label,
m_type: SplitDimension,
m_split: f64,
m_left_child: Option<usize>,
m_right_child: Option<usize>,
}
impl Root {
///
/// Construct a new root from the given label
///
/// Note: The function only contains the given label. No subtrees of connenctions to other
/// tree nodes are constructed.
///
/// To construct a single tree from a forest of root nodes use the Root::init_pst3d(...)
/// function.
///
pub fn new(l: Label) -> Root {
Root {
m_t: l.get_t(),
m_data: l,
m_type: SplitDimension::UNDEF,
m_split: f64::NAN,
m_left_child: None,
m_right_child: None,
}
}
///
/// Initialize a single 3D PST out of a forest of root nodes and return the root node index.
///
/// The function will mutate the given root nodes and set the corresponding split type, split
/// value and left and right subtree indices.
///
/// The function returns the index of the root node in the data array.
///
pub fn init_pst3d(mut data: &mut Vec<Root>) -> Option<usize> {
let mut refs: Vec<RootRef> = Vec::with_capacity(data.len());
data.sort_by(|first, second| if first.m_t < second.m_t {
Ordering::Less
} else if first.m_t > second.m_t {
Ordering::Greater
} else {
Ordering::Equal
});
data.reverse();
for (idx, d) in data.iter().enumerate() {
refs.push(RootRef::new(d, idx));
}
let initial_dimension = SplitDimension::X;
create_root(refs, &mut data, &initial_dimension)
}
///
/// Get a vector of references to the elements in the 3d PST with t >= min_t and that are
/// contained in bbox.
///
pub fn | <'a>(&'a self, bbox: &BBox, min_t: f64, data: &'a Vec<Root>) -> Vec<&'a Label> {
let mut r: Vec<&Label> = Vec::new();
if self.m_t <= min_t {
return r;
}
if bbox.is_contained(&self.m_data) {
r.push(&self.m_data);
}
// append the left child if it exists and is cut by the bounding box
if let Some(idx) = self.m_left_child {
let append = match self.m_type {
SplitDimension::X => bbox.get_min_x() <= self.m_split,
SplitDimension::Y => bbox.get_min_y() <= self.m_split,
SplitDimension::UNDEF => false,
};
if append {
assert!(idx < data.len());
let mut res = data[idx].get(&bbox, min_t, &data);
r.append(&mut res);
}
}
// append the right child if it exists and is cut by the bounding box
if let Some(idx) = self.m_right_child {
let append = match self.m_type {
SplitDimension::X => bbox.get_max_x() > self.m_split,
SplitDimension::Y => bbox.get_max_y() > self.m_split,
SplitDimension::UNDEF => false,
};
if append {
assert!(idx < data.len());
let mut res = data[idx].get(&bbox, min_t, &data);
r.append(&mut res);
}
}
r
}
///
/// Get a human readable string representation of the tree rooted at self.
///
/// A such string will look like:
///
/// ```text
/// x-node (split: 4.5): Label [#1]: 'T1' at (1, 2) with prio 1,elim-t: 9 and label factor: \
/// 1.5
/// l y-node (split: 4.5): Label [#2]: 'T2' at (2, 3) with prio 1, elim-t: 8 and label \
/// factor: 1.5
/// l x-node (split: NaN): Label [#3]: 'T3' at (3, 4) with prio 1, elim-t: 7 and \
/// label factor: 1.5
/// ```
///
pub fn to_string(&self, level: i32, data: &Vec<Root>) -> String {
// prefix is level x p
let p = " ";
let mut prefix = String::new();
for _ in 0..level {
prefix = format!("{}{}", prefix, p);
}
let mut result = match self.m_type {
SplitDimension::X => {
format!("{}x-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
SplitDimension::Y => {
format!("{}y-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
SplitDimension::UNDEF => {
format!("{}leaf-node (split: {}): {}",
prefix,
self.m_split,
self.m_data.to_string())
}
};
// append the left subtree
if let Some(idx) = self.m_left_child {
assert!(idx < data.len());
result = format!("{}\nl{}", result, data[idx].to_string(level + 1, &data));
}
// append the right subtree
if let Some(idx) = self.m_right_child {
assert!(idx < data.len());
result = format!("{}\nr{}", result, data[idx].to_string(level + 1, &data));
}
result
}
}
///
/// The struct represents a reference to a root node and contains all the information required to
/// construct the 3D PST.
///
#[derive(Debug)]
struct RootRef {
m_x: f64,
m_y: f64,
m_t: f64,
m_idx: usize,
}
impl RootRef {
///
/// Initialize a new root ref
///
fn new(r: &Root, idx: usize) -> RootRef {
RootRef {
m_t: r.m_data.get_t(),
m_x: r.m_data.get_x(),
m_y: r.m_data.get_y(),
m_idx: idx,
}
}
///
/// Compare two Root refs with respect to the t value.
///
fn order_by_t(first: &Self, second: &Self) -> Ordering {
if first.m_t < second.m_t {
Ordering::Less
} else if first.m_t > second.m_t {
Ordering::Greater
} else {
Ordering::Equal
}
}
///
/// Compare two Root refs with respect to the x value.
///
fn order_by_x(first: &Self, second: &Self) -> Ordering {
if first.m_x < second.m_x {
Ordering::Less
} else if first.m_x > second.m_x {
Ordering::Greater
} else {
Ordering::Equal
}
}
///
/// Compare two Root refs with respect to the y value.
///
fn order_by_y(first: &Self, second: &Self) -> Ordering {
if first.m_y < second.m_y {
Ordering::Less
} else if first.m_y > second.m_y {
Ordering::Greater
} else {
Ordering::Equal
}
}
}
///
/// In the RootRef vector find the index of the root with the maximum t value.
///
fn find_root_idx(refs: &mut Vec<RootRef>) -> usize {
let mut max_t = 0.;
let mut max_idx = 0;
for (idx, e) in refs.iter().enumerate() {
if e.m_t > max_t {
max_t = e.m_t;
max_idx = idx;
}
}
let r = refs.swap_remove(max_idx);
assert!(r.m_t == max_t);
r.m_idx
}
///
/// From the given RootRef vector construct the subtree and update the corresponding root nodes in
/// the data vector.
///
/// The element with the maximum t value will be set as root with the corresponding split
/// dimension. The remaining elements will sorted by the split dimension. The split value is the
/// corresponding coordinate of item floor(|root_refs| / 2) and the elements are splitted into <=
/// and >.
///
/// From the <= elements the left subtree is constructed recursively with swapped split dimension.
/// Same for the > elements as the right subtree.
///
/// For the nodes in data that are referenced by RootRefs in root_refs the corresponding Roots are
/// updated accordingly.
///
fn create_root(mut root_refs: Vec<RootRef>,
mut data: &mut Vec<Root>,
dim: &SplitDimension)
-> Option<usize> {
if root_refs.is_empty() {
return None;
}
let size1 = root_refs.len();
assert!(*dim!= SplitDimension::UNDEF);
let is_x = *dim == SplitDimension::X;
// find the element with the maximum t value, remove the corresonding RootRef
let root_idx = find_root_idx(&mut root_refs);
// the sub dimension flips from X to Y or from Y to X
let sub_dim = if is_x {
SplitDimension::Y
} else {
SplitDimension::X
};
let mut split_value = f64::NAN;
let mut left_child_idx: Option<usize> = None;
let mut right_child_idx: Option<usize> = None;
let order_asc = if is_x {
RootRef::order_by_x
} else {
RootRef::order_by_y
};
if root_refs.len() == 1 {
split_value = if is_x {
root_refs[0].m_x
} else {
root_refs[0].m_y
};
left_child_idx = create_root(root_refs, &mut data, &sub_dim);
} else if root_refs.len() > 1 {
root_refs.sort_by(order_asc);
// take the x value of the median element as the new split value
let mut median_idx = root_refs.len() / 2 - 1;
split_value = if is_x {
root_refs[median_idx].m_x
} else {
root_refs[median_idx].m_y
};
// ensure that the right children realy have a value > m_split
if is_x {
while median_idx < root_refs.len() && root_refs[median_idx].m_x == split_value {
median_idx = median_idx + 1;
}
} else {
while median_idx < root_refs.len() && root_refs[median_idx].m_y == split_value {
median_idx = median_idx + 1;
}
}
let size2 = root_refs.len();
assert!(size1 == size2 + 1);
// split the data at the median point:
let last = root_refs.split_off(median_idx);
assert!(size2 == root_refs.len() + last.len());
left_child_idx = create_root(root_refs, &mut data, &sub_dim);
right_child_idx = create_root(last, &mut data, &sub_dim);
}
let r = data.get_mut(root_idx)
.expect("Trying to access element at not existing vector position");
r.m_type = if is_x {
SplitDimension::X
} else {
SplitDimension::Y
};
r.m_split = split_value;
r.m_left_child = left_child_idx;
r.m_right_child = right_child_idx;
Some(root_idx)
}
#[test]
fn test_root_new() {
let r = Root::new(Label::new(1., 2., 9., 1, 1, 1.5, "A".to_string()));
assert!(r.m_t == 9.);
assert!(*r.m_data.get_label() == "A".to_string());
assert!(r.m_type == SplitDimension::UNDEF);
}
#[test]
fn test_pst_init() {
let mut f: Vec<Root> = Vec::new();
f.push(Root::new(Label::new(1., 2., 9., 1, 1, 1.5, "A".to_string())));
f.push(Root::new(Label::new(2., 3., 8., 2, 1, 1.5, "B".to_string())));
f.push(Root::new(Label::new(3., 4., 7., 3, 1, 1.5, "C".to_string())));
let root = Root::init_pst3d(&mut f);
let root_idx = root.unwrap();
println!("{}", f[root_idx].to_string(0, &f));
assert!(root_idx == 0);
assert!(f[root_idx].m_type == SplitDimension::X);
assert!(f[root_idx].m_left_child.is_some());
assert!(f[root_idx].m_right_child.is_some());
assert!(f[root_idx].m_left_child.unwrap() == 1);
assert!(f[root_idx].m_right_child.unwrap() == 2);
}
| get | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.