file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs
|
extern crate md5;
use std::io;
use std::io::Read;
use md5::Digest;
struct StopWhenIterator<I, P> {
parent: I,
predicate: P,
completed: bool
}
trait StopWhen<P: FnMut(&Self::Item) -> bool> : Iterator
where Self:Sized {
/// Iterates over items untill while the given predicate is evaluated to true
/// Once predicate is evaluated to false, iterator yields current item and completes.
fn stop_when(self, predicate: P) -> StopWhenIterator<Self, P> {
StopWhenIterator {parent: self, predicate: predicate, completed: false}
}
}
impl<I, P> StopWhen<P> for I
where P: FnMut(&I::Item) -> bool,
I : Iterator { }
impl<I, P> Iterator for StopWhenIterator<I, P>
where P: FnMut(&I::Item) -> bool,
I: Iterator {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
if self.completed {
None
} else {
|
}
Some(x)
})
}
}
}
fn read_input() -> io::Result<String> {
let mut buffer = String::new();
try!(io::stdin().read_to_string(&mut buffer));
Ok(buffer.trim().to_string())
}
fn starts_with_five_zeros(digest: &Digest) -> bool {
digest[0] == 0 && digest[1] == 0 && digest[2] < 16
}
fn sixth_item_is_also_zero(digest: &Digest) -> bool {
digest[2] == 0
}
fn main() {
let input = read_input().unwrap();
let start_with_zero: Vec<_> = (0..i64::max_value())
.map(|i| (i, input.clone() + &i.to_string()))
.map(|(i, s)| (i, md5::compute(s.as_bytes())))
.filter(|&(_, digest)| starts_with_five_zeros(&digest))
.stop_when(|&(_, digest)| sixth_item_is_also_zero(&digest))
.map(|(i, _)| i)
.collect();
println!("Smallest with 5 zeroes: {}", start_with_zero[0]);
println!("Smallest with 6 zeroes: {}", start_with_zero[start_with_zero.len() - 1]);
}
|
self.parent.next().and_then(|x| {
if (self.predicate)(&x) {
self.completed = true;
|
random_line_split
|
main.rs
|
extern crate md5;
use std::io;
use std::io::Read;
use md5::Digest;
struct StopWhenIterator<I, P> {
parent: I,
predicate: P,
completed: bool
}
trait StopWhen<P: FnMut(&Self::Item) -> bool> : Iterator
where Self:Sized {
/// Iterates over items untill while the given predicate is evaluated to true
/// Once predicate is evaluated to false, iterator yields current item and completes.
fn stop_when(self, predicate: P) -> StopWhenIterator<Self, P> {
StopWhenIterator {parent: self, predicate: predicate, completed: false}
}
}
impl<I, P> StopWhen<P> for I
where P: FnMut(&I::Item) -> bool,
I : Iterator { }
impl<I, P> Iterator for StopWhenIterator<I, P>
where P: FnMut(&I::Item) -> bool,
I: Iterator {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
if self.completed {
None
} else {
self.parent.next().and_then(|x| {
if (self.predicate)(&x) {
self.completed = true;
}
Some(x)
})
}
}
}
fn read_input() -> io::Result<String>
|
fn starts_with_five_zeros(digest: &Digest) -> bool {
digest[0] == 0 && digest[1] == 0 && digest[2] < 16
}
fn sixth_item_is_also_zero(digest: &Digest) -> bool {
digest[2] == 0
}
fn main() {
let input = read_input().unwrap();
let start_with_zero: Vec<_> = (0..i64::max_value())
.map(|i| (i, input.clone() + &i.to_string()))
.map(|(i, s)| (i, md5::compute(s.as_bytes())))
.filter(|&(_, digest)| starts_with_five_zeros(&digest))
.stop_when(|&(_, digest)| sixth_item_is_also_zero(&digest))
.map(|(i, _)| i)
.collect();
println!("Smallest with 5 zeroes: {}", start_with_zero[0]);
println!("Smallest with 6 zeroes: {}", start_with_zero[start_with_zero.len() - 1]);
}
|
{
let mut buffer = String::new();
try!(io::stdin().read_to_string(&mut buffer));
Ok(buffer.trim().to_string())
}
|
identifier_body
|
main.rs
|
extern crate md5;
use std::io;
use std::io::Read;
use md5::Digest;
struct StopWhenIterator<I, P> {
parent: I,
predicate: P,
completed: bool
}
trait StopWhen<P: FnMut(&Self::Item) -> bool> : Iterator
where Self:Sized {
/// Iterates over items untill while the given predicate is evaluated to true
/// Once predicate is evaluated to false, iterator yields current item and completes.
fn stop_when(self, predicate: P) -> StopWhenIterator<Self, P> {
StopWhenIterator {parent: self, predicate: predicate, completed: false}
}
}
impl<I, P> StopWhen<P> for I
where P: FnMut(&I::Item) -> bool,
I : Iterator { }
impl<I, P> Iterator for StopWhenIterator<I, P>
where P: FnMut(&I::Item) -> bool,
I: Iterator {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
if self.completed
|
else {
self.parent.next().and_then(|x| {
if (self.predicate)(&x) {
self.completed = true;
}
Some(x)
})
}
}
}
fn read_input() -> io::Result<String> {
let mut buffer = String::new();
try!(io::stdin().read_to_string(&mut buffer));
Ok(buffer.trim().to_string())
}
fn starts_with_five_zeros(digest: &Digest) -> bool {
digest[0] == 0 && digest[1] == 0 && digest[2] < 16
}
fn sixth_item_is_also_zero(digest: &Digest) -> bool {
digest[2] == 0
}
fn main() {
let input = read_input().unwrap();
let start_with_zero: Vec<_> = (0..i64::max_value())
.map(|i| (i, input.clone() + &i.to_string()))
.map(|(i, s)| (i, md5::compute(s.as_bytes())))
.filter(|&(_, digest)| starts_with_five_zeros(&digest))
.stop_when(|&(_, digest)| sixth_item_is_also_zero(&digest))
.map(|(i, _)| i)
.collect();
println!("Smallest with 5 zeroes: {}", start_with_zero[0]);
println!("Smallest with 6 zeroes: {}", start_with_zero[start_with_zero.len() - 1]);
}
|
{
None
}
|
conditional_block
|
main.rs
|
extern crate md5;
use std::io;
use std::io::Read;
use md5::Digest;
struct StopWhenIterator<I, P> {
parent: I,
predicate: P,
completed: bool
}
trait StopWhen<P: FnMut(&Self::Item) -> bool> : Iterator
where Self:Sized {
/// Iterates over items untill while the given predicate is evaluated to true
/// Once predicate is evaluated to false, iterator yields current item and completes.
fn stop_when(self, predicate: P) -> StopWhenIterator<Self, P> {
StopWhenIterator {parent: self, predicate: predicate, completed: false}
}
}
impl<I, P> StopWhen<P> for I
where P: FnMut(&I::Item) -> bool,
I : Iterator { }
impl<I, P> Iterator for StopWhenIterator<I, P>
where P: FnMut(&I::Item) -> bool,
I: Iterator {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
if self.completed {
None
} else {
self.parent.next().and_then(|x| {
if (self.predicate)(&x) {
self.completed = true;
}
Some(x)
})
}
}
}
fn read_input() -> io::Result<String> {
let mut buffer = String::new();
try!(io::stdin().read_to_string(&mut buffer));
Ok(buffer.trim().to_string())
}
fn
|
(digest: &Digest) -> bool {
digest[0] == 0 && digest[1] == 0 && digest[2] < 16
}
fn sixth_item_is_also_zero(digest: &Digest) -> bool {
digest[2] == 0
}
fn main() {
let input = read_input().unwrap();
let start_with_zero: Vec<_> = (0..i64::max_value())
.map(|i| (i, input.clone() + &i.to_string()))
.map(|(i, s)| (i, md5::compute(s.as_bytes())))
.filter(|&(_, digest)| starts_with_five_zeros(&digest))
.stop_when(|&(_, digest)| sixth_item_is_also_zero(&digest))
.map(|(i, _)| i)
.collect();
println!("Smallest with 5 zeroes: {}", start_with_zero[0]);
println!("Smallest with 6 zeroes: {}", start_with_zero[start_with_zero.len() - 1]);
}
|
starts_with_five_zeros
|
identifier_name
|
test.rs
|
use super::FirstSets;
use grammar::repr::*;
use lr1::lookahead::Token::EOF;
use lr1::lookahead::{Token, TokenSet};
use lr1::tls::Lr1Tls;
use string_cache::DefaultAtom as Atom;
use test_util::normalized_grammar;
pub fn nt(t: &str) -> Symbol {
Symbol::Nonterminal(NonterminalString(Atom::from(t)))
}
pub fn term(t: &str) -> Symbol
|
fn la(t: &str) -> Token {
Token::Terminal(TerminalString::quoted(Atom::from(t)))
}
fn first0(first: &FirstSets, symbols: &[Symbol]) -> Vec<Token> {
let v = first.first0(symbols);
v.iter().collect()
}
fn first1(first: &FirstSets, symbols: &[Symbol], lookahead: Token) -> Vec<Token> {
let v = first.first1(symbols, &TokenSet::from(lookahead));
v.iter().collect()
}
#[test]
fn basic_first1() {
let grammar = normalized_grammar(
r#"
grammar;
A = B "C";
B: Option<u32> = {
"D" => Some(1),
=> None
};
X = "E"; // intentionally unreachable
"#,
);
let _lr1_tls = Lr1Tls::install(grammar.terminals.clone());
let first_sets = FirstSets::new(&grammar);
assert_eq!(first1(&first_sets, &[nt("A")], EOF), vec![la("C"), la("D")]);
assert_eq!(first1(&first_sets, &[nt("B")], EOF), vec![la("D"), EOF]);
assert_eq!(
first1(&first_sets, &[nt("B"), term("E")], EOF),
vec![la("D"), la("E")]
);
assert_eq!(
first1(&first_sets, &[nt("B"), nt("X")], EOF),
vec![la("D"), la("E")]
);
}
#[test]
fn basic_first0() {
let grammar = normalized_grammar(
r#"
grammar;
A = B "C";
B: Option<u32> = {
"D" => Some(1),
=> None
};
X = "E"; // intentionally unreachable
"#,
);
let _lr1_tls = Lr1Tls::install(grammar.terminals.clone());
let first_sets = FirstSets::new(&grammar);
assert_eq!(first0(&first_sets, &[nt("A")]), vec![la("C"), la("D")]);
assert_eq!(first0(&first_sets, &[nt("B")]), vec![la("D"), EOF]);
assert_eq!(
first0(&first_sets, &[nt("B"), term("E")]),
vec![la("D"), la("E")]
);
assert_eq!(
first0(&first_sets, &[nt("B"), nt("X")]),
vec![la("D"), la("E")]
);
assert_eq!(first0(&first_sets, &[nt("X")]), vec![la("E")]);
}
|
{
Symbol::Terminal(TerminalString::quoted(Atom::from(t)))
}
|
identifier_body
|
test.rs
|
use super::FirstSets;
use grammar::repr::*;
use lr1::lookahead::Token::EOF;
use lr1::lookahead::{Token, TokenSet};
use lr1::tls::Lr1Tls;
use string_cache::DefaultAtom as Atom;
|
pub fn term(t: &str) -> Symbol {
Symbol::Terminal(TerminalString::quoted(Atom::from(t)))
}
fn la(t: &str) -> Token {
Token::Terminal(TerminalString::quoted(Atom::from(t)))
}
fn first0(first: &FirstSets, symbols: &[Symbol]) -> Vec<Token> {
let v = first.first0(symbols);
v.iter().collect()
}
fn first1(first: &FirstSets, symbols: &[Symbol], lookahead: Token) -> Vec<Token> {
let v = first.first1(symbols, &TokenSet::from(lookahead));
v.iter().collect()
}
#[test]
fn basic_first1() {
let grammar = normalized_grammar(
r#"
grammar;
A = B "C";
B: Option<u32> = {
"D" => Some(1),
=> None
};
X = "E"; // intentionally unreachable
"#,
);
let _lr1_tls = Lr1Tls::install(grammar.terminals.clone());
let first_sets = FirstSets::new(&grammar);
assert_eq!(first1(&first_sets, &[nt("A")], EOF), vec![la("C"), la("D")]);
assert_eq!(first1(&first_sets, &[nt("B")], EOF), vec![la("D"), EOF]);
assert_eq!(
first1(&first_sets, &[nt("B"), term("E")], EOF),
vec![la("D"), la("E")]
);
assert_eq!(
first1(&first_sets, &[nt("B"), nt("X")], EOF),
vec![la("D"), la("E")]
);
}
#[test]
fn basic_first0() {
let grammar = normalized_grammar(
r#"
grammar;
A = B "C";
B: Option<u32> = {
"D" => Some(1),
=> None
};
X = "E"; // intentionally unreachable
"#,
);
let _lr1_tls = Lr1Tls::install(grammar.terminals.clone());
let first_sets = FirstSets::new(&grammar);
assert_eq!(first0(&first_sets, &[nt("A")]), vec![la("C"), la("D")]);
assert_eq!(first0(&first_sets, &[nt("B")]), vec![la("D"), EOF]);
assert_eq!(
first0(&first_sets, &[nt("B"), term("E")]),
vec![la("D"), la("E")]
);
assert_eq!(
first0(&first_sets, &[nt("B"), nt("X")]),
vec![la("D"), la("E")]
);
assert_eq!(first0(&first_sets, &[nt("X")]), vec![la("E")]);
}
|
use test_util::normalized_grammar;
pub fn nt(t: &str) -> Symbol {
Symbol::Nonterminal(NonterminalString(Atom::from(t)))
}
|
random_line_split
|
test.rs
|
use super::FirstSets;
use grammar::repr::*;
use lr1::lookahead::Token::EOF;
use lr1::lookahead::{Token, TokenSet};
use lr1::tls::Lr1Tls;
use string_cache::DefaultAtom as Atom;
use test_util::normalized_grammar;
pub fn nt(t: &str) -> Symbol {
Symbol::Nonterminal(NonterminalString(Atom::from(t)))
}
pub fn term(t: &str) -> Symbol {
Symbol::Terminal(TerminalString::quoted(Atom::from(t)))
}
fn
|
(t: &str) -> Token {
Token::Terminal(TerminalString::quoted(Atom::from(t)))
}
fn first0(first: &FirstSets, symbols: &[Symbol]) -> Vec<Token> {
let v = first.first0(symbols);
v.iter().collect()
}
fn first1(first: &FirstSets, symbols: &[Symbol], lookahead: Token) -> Vec<Token> {
let v = first.first1(symbols, &TokenSet::from(lookahead));
v.iter().collect()
}
#[test]
fn basic_first1() {
let grammar = normalized_grammar(
r#"
grammar;
A = B "C";
B: Option<u32> = {
"D" => Some(1),
=> None
};
X = "E"; // intentionally unreachable
"#,
);
let _lr1_tls = Lr1Tls::install(grammar.terminals.clone());
let first_sets = FirstSets::new(&grammar);
assert_eq!(first1(&first_sets, &[nt("A")], EOF), vec![la("C"), la("D")]);
assert_eq!(first1(&first_sets, &[nt("B")], EOF), vec![la("D"), EOF]);
assert_eq!(
first1(&first_sets, &[nt("B"), term("E")], EOF),
vec![la("D"), la("E")]
);
assert_eq!(
first1(&first_sets, &[nt("B"), nt("X")], EOF),
vec![la("D"), la("E")]
);
}
#[test]
fn basic_first0() {
let grammar = normalized_grammar(
r#"
grammar;
A = B "C";
B: Option<u32> = {
"D" => Some(1),
=> None
};
X = "E"; // intentionally unreachable
"#,
);
let _lr1_tls = Lr1Tls::install(grammar.terminals.clone());
let first_sets = FirstSets::new(&grammar);
assert_eq!(first0(&first_sets, &[nt("A")]), vec![la("C"), la("D")]);
assert_eq!(first0(&first_sets, &[nt("B")]), vec![la("D"), EOF]);
assert_eq!(
first0(&first_sets, &[nt("B"), term("E")]),
vec![la("D"), la("E")]
);
assert_eq!(
first0(&first_sets, &[nt("B"), nt("X")]),
vec![la("D"), la("E")]
);
assert_eq!(first0(&first_sets, &[nt("X")]), vec![la("E")]);
}
|
la
|
identifier_name
|
ordered_collection.rs
|
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{BTreeMap};
use std::collections::btree_map;
pub trait OrderedCollection<K: Clone + Ord, V> {
fn insert_unique(&mut self, k: K, v: V) {
self.update_value(k, move|v_opt| match v_opt {
Some(_) => panic!("Key already exists."),
None => v,
});
}
fn pop_min_when<F>(&mut self, ready: F) -> Option<(K, V)>
where F: Fn(&K, &V) -> bool;
fn update_value<F>(&mut self, k: K, f: F) where F: FnOnce(Option<&V>) -> V;
fn find_min<'a>(&'a self) -> Option<(&'a K, &'a V)>;
}
impl <K: Clone + Ord, V> OrderedCollection<K, V> for BTreeMap<K, V> {
fn
|
<F>(&mut self, k: K, f: F) where F: FnOnce(Option<&V>) -> V {
match self.entry(k) {
btree_map::Entry::Occupied(mut entry) => {
let new_v = f(Some(entry.get()));
entry.insert(new_v);
},
btree_map::Entry::Vacant(space) => {
space.insert(f(None));
}
}
}
fn pop_min_when<F>(&mut self, ready: F) -> Option<(K, V)>
where F: Fn(&K, &V) -> bool
{
let k_opt = self.find_min().and_then(|(k, v)| if ready(k, v) { Some(k.clone()) } else { None });
k_opt.map(|k| { let v = self.remove(&k).unwrap();
(k, v) })
}
fn find_min<'a>(&'a self) -> Option<(&'a K, &'a V)> {
self.iter().next()
}
}
|
update_value
|
identifier_name
|
ordered_collection.rs
|
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{BTreeMap};
use std::collections::btree_map;
pub trait OrderedCollection<K: Clone + Ord, V> {
fn insert_unique(&mut self, k: K, v: V) {
self.update_value(k, move|v_opt| match v_opt {
Some(_) => panic!("Key already exists."),
None => v,
});
}
fn pop_min_when<F>(&mut self, ready: F) -> Option<(K, V)>
where F: Fn(&K, &V) -> bool;
fn update_value<F>(&mut self, k: K, f: F) where F: FnOnce(Option<&V>) -> V;
fn find_min<'a>(&'a self) -> Option<(&'a K, &'a V)>;
}
impl <K: Clone + Ord, V> OrderedCollection<K, V> for BTreeMap<K, V> {
fn update_value<F>(&mut self, k: K, f: F) where F: FnOnce(Option<&V>) -> V {
match self.entry(k) {
btree_map::Entry::Occupied(mut entry) => {
let new_v = f(Some(entry.get()));
entry.insert(new_v);
},
btree_map::Entry::Vacant(space) => {
space.insert(f(None));
}
}
}
fn pop_min_when<F>(&mut self, ready: F) -> Option<(K, V)>
where F: Fn(&K, &V) -> bool
{
let k_opt = self.find_min().and_then(|(k, v)| if ready(k, v) { Some(k.clone()) } else { None });
k_opt.map(|k| { let v = self.remove(&k).unwrap();
(k, v) })
}
fn find_min<'a>(&'a self) -> Option<(&'a K, &'a V)>
|
}
|
{
self.iter().next()
}
|
identifier_body
|
ordered_collection.rs
|
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::{BTreeMap};
use std::collections::btree_map;
pub trait OrderedCollection<K: Clone + Ord, V> {
fn insert_unique(&mut self, k: K, v: V) {
self.update_value(k, move|v_opt| match v_opt {
Some(_) => panic!("Key already exists."),
None => v,
});
}
fn pop_min_when<F>(&mut self, ready: F) -> Option<(K, V)>
where F: Fn(&K, &V) -> bool;
fn update_value<F>(&mut self, k: K, f: F) where F: FnOnce(Option<&V>) -> V;
fn find_min<'a>(&'a self) -> Option<(&'a K, &'a V)>;
|
match self.entry(k) {
btree_map::Entry::Occupied(mut entry) => {
let new_v = f(Some(entry.get()));
entry.insert(new_v);
},
btree_map::Entry::Vacant(space) => {
space.insert(f(None));
}
}
}
fn pop_min_when<F>(&mut self, ready: F) -> Option<(K, V)>
where F: Fn(&K, &V) -> bool
{
let k_opt = self.find_min().and_then(|(k, v)| if ready(k, v) { Some(k.clone()) } else { None });
k_opt.map(|k| { let v = self.remove(&k).unwrap();
(k, v) })
}
fn find_min<'a>(&'a self) -> Option<(&'a K, &'a V)> {
self.iter().next()
}
}
|
}
impl <K: Clone + Ord, V> OrderedCollection<K, V> for BTreeMap<K, V> {
fn update_value<F>(&mut self, k: K, f: F) where F: FnOnce(Option<&V>) -> V {
|
random_line_split
|
lulzvm.rs
|
extern crate ctrlc;
extern crate lulzvm;
#[macro_use]
extern crate log;
extern crate env_logger;
#[macro_use]
extern crate clap;
use clap::{ArgGroup, ArgMatches, App};
|
use std::env;
use std::fs::File;
use std::io::{stdin, stdout, Read, Result};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
fn main() {
let matches = App::new("LulzVM")
.args_from_usage("[FILE] 'Bytecode executable'
-d, --debug 'Enable debug messages'")
.group(ArgGroup::with_name("required")
.args(&["FILE"])
.required(true))
.get_matches();
match do_checked_main(matches) {
Ok(_) => (),
Err(e) => println!("Error: {:?}", e),
}
}
fn do_checked_main(matches: ArgMatches) -> Result<()> {
let executable_filename = matches.value_of("FILE").unwrap();
let mut executable = Vec::new();
let mut executable_file = try!(File::open(executable_filename));
let _ = try!(executable_file.read_to_end(&mut executable));
if matches.is_present("debug") {
env::set_var("RUST_LOG", "lulzvm::vm=debug,error,info,warn,trace");
let _ = env_logger::init().unwrap();
}
let termination_scheduled = Arc::new(AtomicBool::new(false));
let r = termination_scheduled.clone();
ctrlc::set_handler(move || {
info!("Terminating...");
r.store(true, Ordering::Relaxed);
});
let mut vm = VM::new(stdin(), stdout(), executable, termination_scheduled);
vm.run();
Ok(())
}
|
use lulzvm::vm::VM;
|
random_line_split
|
lulzvm.rs
|
extern crate ctrlc;
extern crate lulzvm;
#[macro_use]
extern crate log;
extern crate env_logger;
#[macro_use]
extern crate clap;
use clap::{ArgGroup, ArgMatches, App};
use lulzvm::vm::VM;
use std::env;
use std::fs::File;
use std::io::{stdin, stdout, Read, Result};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
fn main()
|
fn do_checked_main(matches: ArgMatches) -> Result<()> {
let executable_filename = matches.value_of("FILE").unwrap();
let mut executable = Vec::new();
let mut executable_file = try!(File::open(executable_filename));
let _ = try!(executable_file.read_to_end(&mut executable));
if matches.is_present("debug") {
env::set_var("RUST_LOG", "lulzvm::vm=debug,error,info,warn,trace");
let _ = env_logger::init().unwrap();
}
let termination_scheduled = Arc::new(AtomicBool::new(false));
let r = termination_scheduled.clone();
ctrlc::set_handler(move || {
info!("Terminating...");
r.store(true, Ordering::Relaxed);
});
let mut vm = VM::new(stdin(), stdout(), executable, termination_scheduled);
vm.run();
Ok(())
}
|
{
let matches = App::new("LulzVM")
.args_from_usage("[FILE] 'Bytecode executable'
-d, --debug 'Enable debug messages'")
.group(ArgGroup::with_name("required")
.args(&["FILE"])
.required(true))
.get_matches();
match do_checked_main(matches) {
Ok(_) => (),
Err(e) => println!("Error: {:?}", e),
}
}
|
identifier_body
|
lulzvm.rs
|
extern crate ctrlc;
extern crate lulzvm;
#[macro_use]
extern crate log;
extern crate env_logger;
#[macro_use]
extern crate clap;
use clap::{ArgGroup, ArgMatches, App};
use lulzvm::vm::VM;
use std::env;
use std::fs::File;
use std::io::{stdin, stdout, Read, Result};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
fn
|
() {
let matches = App::new("LulzVM")
.args_from_usage("[FILE] 'Bytecode executable'
-d, --debug 'Enable debug messages'")
.group(ArgGroup::with_name("required")
.args(&["FILE"])
.required(true))
.get_matches();
match do_checked_main(matches) {
Ok(_) => (),
Err(e) => println!("Error: {:?}", e),
}
}
fn do_checked_main(matches: ArgMatches) -> Result<()> {
let executable_filename = matches.value_of("FILE").unwrap();
let mut executable = Vec::new();
let mut executable_file = try!(File::open(executable_filename));
let _ = try!(executable_file.read_to_end(&mut executable));
if matches.is_present("debug") {
env::set_var("RUST_LOG", "lulzvm::vm=debug,error,info,warn,trace");
let _ = env_logger::init().unwrap();
}
let termination_scheduled = Arc::new(AtomicBool::new(false));
let r = termination_scheduled.clone();
ctrlc::set_handler(move || {
info!("Terminating...");
r.store(true, Ordering::Relaxed);
});
let mut vm = VM::new(stdin(), stdout(), executable, termination_scheduled);
vm.run();
Ok(())
}
|
main
|
identifier_name
|
value.rs
|
use std::fmt::{Debug, Display, Formatter, Error};
use std::fmt::Result as FmtResult;
use num::Float;
use std::mem;
use super::clip::{ClipHolder};
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub enum Value {
Int(i64),
Float(FloatWrap),
Bool(bool),
String(String),
Tuple(Vec<Value>),
Clip(ClipHolder),
Nil
}
impl Display for Value {
fn fmt<'r>(&'r self, formatter: &mut Formatter) -> FmtResult {
match self {
&Value::Int(i) => write!(formatter, "{}", i),
&Value::Float(ref f) => write!(formatter, "{}", f),
&Value::Bool(b) => write!(formatter, "{}", b),
&Value::String(ref s) => write!(formatter, "{}", s),
&Value::Tuple(ref v) => {
match write!(formatter, "(") {
Ok(()) => (),
Err(e) => {return Err(e);}
}
let len = v.len();
for (idx, val) in v.iter().enumerate() {
match write!(formatter, "{}", val) {
Ok(()) => (),
Err(e) => {return Err(e);}
}
if idx!= len - 1 {
match write!(formatter, ", ") {
Ok(()) => (),
Err(e) => {return Err(e);}
}
}
}
match write!(formatter, ")") {
Ok(()) => Ok(()),
Err(e) => Err(e)
}
}
&Value::Clip(_) => write!(formatter, "<Clip>"),
&Value::Nil => write!(formatter, "nil"),
}
}
}
#[derive(PartialEq, Eq, Hash, Clone)]
pub struct FloatWrap(u64);
impl FloatWrap {
pub fn new(mut val: f64) -> FloatWrap {
// make all NaNs have the same representation
if val.is_nan() {
val = Float::nan()
}
unsafe {
FloatWrap(mem::transmute(val))
}
}
pub fn get(&self) -> f64 {
let cl = self.clone();
unsafe {
mem::transmute(cl)
}
}
}
impl Debug for FloatWrap {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error>
|
}
impl Display for FloatWrap {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{}", self.get())
}
}
|
{
write!(f, "{:?}", self.get())
}
|
identifier_body
|
value.rs
|
use std::fmt::{Debug, Display, Formatter, Error};
use std::fmt::Result as FmtResult;
use num::Float;
use std::mem;
use super::clip::{ClipHolder};
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub enum
|
{
Int(i64),
Float(FloatWrap),
Bool(bool),
String(String),
Tuple(Vec<Value>),
Clip(ClipHolder),
Nil
}
impl Display for Value {
fn fmt<'r>(&'r self, formatter: &mut Formatter) -> FmtResult {
match self {
&Value::Int(i) => write!(formatter, "{}", i),
&Value::Float(ref f) => write!(formatter, "{}", f),
&Value::Bool(b) => write!(formatter, "{}", b),
&Value::String(ref s) => write!(formatter, "{}", s),
&Value::Tuple(ref v) => {
match write!(formatter, "(") {
Ok(()) => (),
Err(e) => {return Err(e);}
}
let len = v.len();
for (idx, val) in v.iter().enumerate() {
match write!(formatter, "{}", val) {
Ok(()) => (),
Err(e) => {return Err(e);}
}
if idx!= len - 1 {
match write!(formatter, ", ") {
Ok(()) => (),
Err(e) => {return Err(e);}
}
}
}
match write!(formatter, ")") {
Ok(()) => Ok(()),
Err(e) => Err(e)
}
}
&Value::Clip(_) => write!(formatter, "<Clip>"),
&Value::Nil => write!(formatter, "nil"),
}
}
}
#[derive(PartialEq, Eq, Hash, Clone)]
pub struct FloatWrap(u64);
impl FloatWrap {
pub fn new(mut val: f64) -> FloatWrap {
// make all NaNs have the same representation
if val.is_nan() {
val = Float::nan()
}
unsafe {
FloatWrap(mem::transmute(val))
}
}
pub fn get(&self) -> f64 {
let cl = self.clone();
unsafe {
mem::transmute(cl)
}
}
}
impl Debug for FloatWrap {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{:?}", self.get())
}
}
impl Display for FloatWrap {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{}", self.get())
}
}
|
Value
|
identifier_name
|
value.rs
|
use std::fmt::{Debug, Display, Formatter, Error};
use std::fmt::Result as FmtResult;
use num::Float;
use std::mem;
use super::clip::{ClipHolder};
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub enum Value {
Int(i64),
Float(FloatWrap),
Bool(bool),
String(String),
Tuple(Vec<Value>),
Clip(ClipHolder),
Nil
}
impl Display for Value {
fn fmt<'r>(&'r self, formatter: &mut Formatter) -> FmtResult {
match self {
&Value::Int(i) => write!(formatter, "{}", i),
&Value::Float(ref f) => write!(formatter, "{}", f),
&Value::Bool(b) => write!(formatter, "{}", b),
&Value::String(ref s) => write!(formatter, "{}", s),
&Value::Tuple(ref v) => {
match write!(formatter, "(") {
Ok(()) => (),
Err(e) => {return Err(e);}
}
let len = v.len();
for (idx, val) in v.iter().enumerate() {
match write!(formatter, "{}", val) {
Ok(()) => (),
|
}
if idx!= len - 1 {
match write!(formatter, ", ") {
Ok(()) => (),
Err(e) => {return Err(e);}
}
}
}
match write!(formatter, ")") {
Ok(()) => Ok(()),
Err(e) => Err(e)
}
}
&Value::Clip(_) => write!(formatter, "<Clip>"),
&Value::Nil => write!(formatter, "nil"),
}
}
}
#[derive(PartialEq, Eq, Hash, Clone)]
pub struct FloatWrap(u64);
impl FloatWrap {
pub fn new(mut val: f64) -> FloatWrap {
// make all NaNs have the same representation
if val.is_nan() {
val = Float::nan()
}
unsafe {
FloatWrap(mem::transmute(val))
}
}
pub fn get(&self) -> f64 {
let cl = self.clone();
unsafe {
mem::transmute(cl)
}
}
}
impl Debug for FloatWrap {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{:?}", self.get())
}
}
impl Display for FloatWrap {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{}", self.get())
}
}
|
Err(e) => {return Err(e);}
|
random_line_split
|
quadratic.rs
|
// Copyright (c) 2015, Mikhail Vorotilov
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use super::super::FloatType;
use super::super::Roots;
/// Solves a quadratic equation a2*x^2 + a1*x + a0 = 0.
///
/// In case two roots are present, the first returned root is less than the second one.
///
/// # Examples
///
/// ```
/// use roots::Roots;
/// use roots::find_roots_quadratic;
///
/// let no_roots = find_roots_quadratic(1f32, 0f32, 1f32);
/// // Returns Roots::No([]) as 'x^2 + 1 = 0' has no roots
///
/// let one_root = find_roots_quadratic(1f64, 0f64, 0f64);
/// // Returns Roots::One([0f64]) as 'x^2 = 0' has one root 0
///
/// let two_roots = find_roots_quadratic(1f32, 0f32, -1f32);
/// // Returns Roots::Two([-1f32,1f32]) as 'x^2 - 1 = 0' has roots -1 and 1
/// ```
pub fn find_roots_quadratic<F: FloatType>(a2: F, a1: F, a0: F) -> Roots<F> {
// Handle non-standard cases
if a2 == F::zero() {
// a2 = 0; a1*x+a0=0; solve linear equation
super::linear::find_roots_linear(a1, a0)
} else {
let _2 = F::from(2i16);
let _4 = F::from(4i16);
// Rust lacks a simple way to convert an integer constant to generic type F
let discriminant = a1 * a1 - _4 * a2 * a0;
if discriminant < F::zero() {
Roots::No([])
} else {
let a2x2 = _2 * a2;
if discriminant == F::zero() {
Roots::One([-a1 / a2x2])
} else {
// To improve precision, do not use the smallest divisor.
// See https://people.csail.mit.edu/bkph/articles/Quadratics.pdf
let sq = discriminant.sqrt();
let (same_sign, diff_sign) = if a1 < F::zero() {
(-a1 + sq, -a1 - sq)
} else {
(-a1 - sq, -a1 + sq)
};
let (x1, x2) = if same_sign.abs() > a2x2.abs() {
let a0x2 = _2 * a0;
if diff_sign.abs() > a2x2.abs() {
// 2*a2 is the smallest divisor, do not use it
(a0x2 / same_sign, a0x2 / diff_sign)
} else {
// diff_sign is the smallest divisor, do not use it
(a0x2 / same_sign, same_sign / a2x2)
}
} else {
// 2*a2 is the greatest divisor, use it
(diff_sign / a2x2, same_sign / a2x2)
};
// Order roots
if x1 < x2 {
Roots::Two([x1, x2])
} else {
Roots::Two([x2, x1])
}
}
}
}
}
#[cfg(test)]
mod test {
use super::super::super::*;
#[test]
fn test_find_roots_quadratic() {
assert_eq!(find_roots_quadratic(0f32, 0f32, 0f32), Roots::One([0f32]));
assert_eq!(find_roots_quadratic(1f32, 0f32, 1f32), Roots::No([]));
assert_eq!(find_roots_quadratic(1f64, 0f64, -1f64), Roots::Two([-1f64, 1f64]));
}
#[test]
fn test_find_roots_quadratic_small_a2() {
assert_eq!(
find_roots_quadratic(1e-20f32, -1f32, -1e-30f32),
Roots::Two([-1e-30f32, 1e20f32])
);
assert_eq!(
find_roots_quadratic(-1e-20f32, 1f32, 1e-30f32),
Roots::Two([-1e-30f32, 1e20f32])
);
assert_eq!(find_roots_quadratic(1e-20f32, -1f32, 1f32), Roots::Two([1f32, 1e20f32]));
assert_eq!(find_roots_quadratic(-1e-20f32, 1f32, 1f32), Roots::Two([-1f32, 1e20f32]));
assert_eq!(find_roots_quadratic(-1e-20f32, 1f32, -1f32), Roots::Two([1f32, 1e20f32]));
}
#[test]
fn
|
() {
assert_eq!(find_roots_quadratic(1f32, -1e15f32, -1f32), Roots::Two([-1e-15f32, 1e15f32]));
assert_eq!(find_roots_quadratic(-1f32, 1e15f32, 1f32), Roots::Two([-1e-15f32, 1e15f32]));
}
}
|
test_find_roots_quadratic_big_a1
|
identifier_name
|
quadratic.rs
|
// Copyright (c) 2015, Mikhail Vorotilov
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use super::super::FloatType;
use super::super::Roots;
/// Solves a quadratic equation a2*x^2 + a1*x + a0 = 0.
///
/// In case two roots are present, the first returned root is less than the second one.
///
/// # Examples
///
/// ```
/// use roots::Roots;
/// use roots::find_roots_quadratic;
///
/// let no_roots = find_roots_quadratic(1f32, 0f32, 1f32);
/// // Returns Roots::No([]) as 'x^2 + 1 = 0' has no roots
///
/// let one_root = find_roots_quadratic(1f64, 0f64, 0f64);
/// // Returns Roots::One([0f64]) as 'x^2 = 0' has one root 0
///
/// let two_roots = find_roots_quadratic(1f32, 0f32, -1f32);
/// // Returns Roots::Two([-1f32,1f32]) as 'x^2 - 1 = 0' has roots -1 and 1
/// ```
pub fn find_roots_quadratic<F: FloatType>(a2: F, a1: F, a0: F) -> Roots<F> {
// Handle non-standard cases
if a2 == F::zero() {
// a2 = 0; a1*x+a0=0; solve linear equation
super::linear::find_roots_linear(a1, a0)
} else {
let _2 = F::from(2i16);
let _4 = F::from(4i16);
// Rust lacks a simple way to convert an integer constant to generic type F
let discriminant = a1 * a1 - _4 * a2 * a0;
if discriminant < F::zero() {
Roots::No([])
} else {
let a2x2 = _2 * a2;
if discriminant == F::zero() {
Roots::One([-a1 / a2x2])
} else {
// To improve precision, do not use the smallest divisor.
// See https://people.csail.mit.edu/bkph/articles/Quadratics.pdf
let sq = discriminant.sqrt();
let (same_sign, diff_sign) = if a1 < F::zero() {
(-a1 + sq, -a1 - sq)
} else {
(-a1 - sq, -a1 + sq)
};
let (x1, x2) = if same_sign.abs() > a2x2.abs() {
let a0x2 = _2 * a0;
if diff_sign.abs() > a2x2.abs() {
// 2*a2 is the smallest divisor, do not use it
(a0x2 / same_sign, a0x2 / diff_sign)
} else {
// diff_sign is the smallest divisor, do not use it
(a0x2 / same_sign, same_sign / a2x2)
}
} else {
// 2*a2 is the greatest divisor, use it
(diff_sign / a2x2, same_sign / a2x2)
};
// Order roots
if x1 < x2 {
Roots::Two([x1, x2])
} else {
Roots::Two([x2, x1])
}
}
}
}
}
#[cfg(test)]
mod test {
use super::super::super::*;
#[test]
fn test_find_roots_quadratic() {
assert_eq!(find_roots_quadratic(0f32, 0f32, 0f32), Roots::One([0f32]));
assert_eq!(find_roots_quadratic(1f32, 0f32, 1f32), Roots::No([]));
assert_eq!(find_roots_quadratic(1f64, 0f64, -1f64), Roots::Two([-1f64, 1f64]));
}
#[test]
fn test_find_roots_quadratic_small_a2()
|
#[test]
fn test_find_roots_quadratic_big_a1() {
assert_eq!(find_roots_quadratic(1f32, -1e15f32, -1f32), Roots::Two([-1e-15f32, 1e15f32]));
assert_eq!(find_roots_quadratic(-1f32, 1e15f32, 1f32), Roots::Two([-1e-15f32, 1e15f32]));
}
}
|
{
assert_eq!(
find_roots_quadratic(1e-20f32, -1f32, -1e-30f32),
Roots::Two([-1e-30f32, 1e20f32])
);
assert_eq!(
find_roots_quadratic(-1e-20f32, 1f32, 1e-30f32),
Roots::Two([-1e-30f32, 1e20f32])
);
assert_eq!(find_roots_quadratic(1e-20f32, -1f32, 1f32), Roots::Two([1f32, 1e20f32]));
assert_eq!(find_roots_quadratic(-1e-20f32, 1f32, 1f32), Roots::Two([-1f32, 1e20f32]));
assert_eq!(find_roots_quadratic(-1e-20f32, 1f32, -1f32), Roots::Two([1f32, 1e20f32]));
}
|
identifier_body
|
quadratic.rs
|
// Copyright (c) 2015, Mikhail Vorotilov
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use super::super::FloatType;
use super::super::Roots;
/// Solves a quadratic equation a2*x^2 + a1*x + a0 = 0.
///
/// In case two roots are present, the first returned root is less than the second one.
///
/// # Examples
///
/// ```
/// use roots::Roots;
/// use roots::find_roots_quadratic;
///
/// let no_roots = find_roots_quadratic(1f32, 0f32, 1f32);
/// // Returns Roots::No([]) as 'x^2 + 1 = 0' has no roots
///
/// let one_root = find_roots_quadratic(1f64, 0f64, 0f64);
/// // Returns Roots::One([0f64]) as 'x^2 = 0' has one root 0
///
/// let two_roots = find_roots_quadratic(1f32, 0f32, -1f32);
/// // Returns Roots::Two([-1f32,1f32]) as 'x^2 - 1 = 0' has roots -1 and 1
/// ```
pub fn find_roots_quadratic<F: FloatType>(a2: F, a1: F, a0: F) -> Roots<F> {
// Handle non-standard cases
if a2 == F::zero() {
// a2 = 0; a1*x+a0=0; solve linear equation
super::linear::find_roots_linear(a1, a0)
} else {
let _2 = F::from(2i16);
let _4 = F::from(4i16);
// Rust lacks a simple way to convert an integer constant to generic type F
let discriminant = a1 * a1 - _4 * a2 * a0;
if discriminant < F::zero() {
Roots::No([])
} else {
let a2x2 = _2 * a2;
if discriminant == F::zero() {
Roots::One([-a1 / a2x2])
} else {
// To improve precision, do not use the smallest divisor.
// See https://people.csail.mit.edu/bkph/articles/Quadratics.pdf
let sq = discriminant.sqrt();
let (same_sign, diff_sign) = if a1 < F::zero() {
(-a1 + sq, -a1 - sq)
} else {
(-a1 - sq, -a1 + sq)
};
let (x1, x2) = if same_sign.abs() > a2x2.abs() {
let a0x2 = _2 * a0;
if diff_sign.abs() > a2x2.abs() {
// 2*a2 is the smallest divisor, do not use it
(a0x2 / same_sign, a0x2 / diff_sign)
} else {
// diff_sign is the smallest divisor, do not use it
(a0x2 / same_sign, same_sign / a2x2)
}
} else {
// 2*a2 is the greatest divisor, use it
(diff_sign / a2x2, same_sign / a2x2)
};
|
}
}
}
}
}
#[cfg(test)]
mod test {
use super::super::super::*;
#[test]
fn test_find_roots_quadratic() {
assert_eq!(find_roots_quadratic(0f32, 0f32, 0f32), Roots::One([0f32]));
assert_eq!(find_roots_quadratic(1f32, 0f32, 1f32), Roots::No([]));
assert_eq!(find_roots_quadratic(1f64, 0f64, -1f64), Roots::Two([-1f64, 1f64]));
}
#[test]
fn test_find_roots_quadratic_small_a2() {
assert_eq!(
find_roots_quadratic(1e-20f32, -1f32, -1e-30f32),
Roots::Two([-1e-30f32, 1e20f32])
);
assert_eq!(
find_roots_quadratic(-1e-20f32, 1f32, 1e-30f32),
Roots::Two([-1e-30f32, 1e20f32])
);
assert_eq!(find_roots_quadratic(1e-20f32, -1f32, 1f32), Roots::Two([1f32, 1e20f32]));
assert_eq!(find_roots_quadratic(-1e-20f32, 1f32, 1f32), Roots::Two([-1f32, 1e20f32]));
assert_eq!(find_roots_quadratic(-1e-20f32, 1f32, -1f32), Roots::Two([1f32, 1e20f32]));
}
#[test]
fn test_find_roots_quadratic_big_a1() {
assert_eq!(find_roots_quadratic(1f32, -1e15f32, -1f32), Roots::Two([-1e-15f32, 1e15f32]));
assert_eq!(find_roots_quadratic(-1f32, 1e15f32, 1f32), Roots::Two([-1e-15f32, 1e15f32]));
}
}
|
// Order roots
if x1 < x2 {
Roots::Two([x1, x2])
} else {
Roots::Two([x2, x1])
|
random_line_split
|
quadratic.rs
|
// Copyright (c) 2015, Mikhail Vorotilov
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use super::super::FloatType;
use super::super::Roots;
/// Solves a quadratic equation a2*x^2 + a1*x + a0 = 0.
///
/// In case two roots are present, the first returned root is less than the second one.
///
/// # Examples
///
/// ```
/// use roots::Roots;
/// use roots::find_roots_quadratic;
///
/// let no_roots = find_roots_quadratic(1f32, 0f32, 1f32);
/// // Returns Roots::No([]) as 'x^2 + 1 = 0' has no roots
///
/// let one_root = find_roots_quadratic(1f64, 0f64, 0f64);
/// // Returns Roots::One([0f64]) as 'x^2 = 0' has one root 0
///
/// let two_roots = find_roots_quadratic(1f32, 0f32, -1f32);
/// // Returns Roots::Two([-1f32,1f32]) as 'x^2 - 1 = 0' has roots -1 and 1
/// ```
pub fn find_roots_quadratic<F: FloatType>(a2: F, a1: F, a0: F) -> Roots<F> {
// Handle non-standard cases
if a2 == F::zero() {
// a2 = 0; a1*x+a0=0; solve linear equation
super::linear::find_roots_linear(a1, a0)
} else {
let _2 = F::from(2i16);
let _4 = F::from(4i16);
// Rust lacks a simple way to convert an integer constant to generic type F
let discriminant = a1 * a1 - _4 * a2 * a0;
if discriminant < F::zero() {
Roots::No([])
} else {
let a2x2 = _2 * a2;
if discriminant == F::zero() {
Roots::One([-a1 / a2x2])
} else {
// To improve precision, do not use the smallest divisor.
// See https://people.csail.mit.edu/bkph/articles/Quadratics.pdf
let sq = discriminant.sqrt();
let (same_sign, diff_sign) = if a1 < F::zero() {
(-a1 + sq, -a1 - sq)
} else {
(-a1 - sq, -a1 + sq)
};
let (x1, x2) = if same_sign.abs() > a2x2.abs() {
let a0x2 = _2 * a0;
if diff_sign.abs() > a2x2.abs() {
// 2*a2 is the smallest divisor, do not use it
(a0x2 / same_sign, a0x2 / diff_sign)
} else
|
} else {
// 2*a2 is the greatest divisor, use it
(diff_sign / a2x2, same_sign / a2x2)
};
// Order roots
if x1 < x2 {
Roots::Two([x1, x2])
} else {
Roots::Two([x2, x1])
}
}
}
}
}
#[cfg(test)]
mod test {
use super::super::super::*;
#[test]
fn test_find_roots_quadratic() {
assert_eq!(find_roots_quadratic(0f32, 0f32, 0f32), Roots::One([0f32]));
assert_eq!(find_roots_quadratic(1f32, 0f32, 1f32), Roots::No([]));
assert_eq!(find_roots_quadratic(1f64, 0f64, -1f64), Roots::Two([-1f64, 1f64]));
}
#[test]
fn test_find_roots_quadratic_small_a2() {
assert_eq!(
find_roots_quadratic(1e-20f32, -1f32, -1e-30f32),
Roots::Two([-1e-30f32, 1e20f32])
);
assert_eq!(
find_roots_quadratic(-1e-20f32, 1f32, 1e-30f32),
Roots::Two([-1e-30f32, 1e20f32])
);
assert_eq!(find_roots_quadratic(1e-20f32, -1f32, 1f32), Roots::Two([1f32, 1e20f32]));
assert_eq!(find_roots_quadratic(-1e-20f32, 1f32, 1f32), Roots::Two([-1f32, 1e20f32]));
assert_eq!(find_roots_quadratic(-1e-20f32, 1f32, -1f32), Roots::Two([1f32, 1e20f32]));
}
#[test]
fn test_find_roots_quadratic_big_a1() {
assert_eq!(find_roots_quadratic(1f32, -1e15f32, -1f32), Roots::Two([-1e-15f32, 1e15f32]));
assert_eq!(find_roots_quadratic(-1f32, 1e15f32, 1f32), Roots::Two([-1e-15f32, 1e15f32]));
}
}
|
{
// diff_sign is the smallest divisor, do not use it
(a0x2 / same_sign, same_sign / a2x2)
}
|
conditional_block
|
prefs.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use basedir::default_config_dir;
use num_cpus;
use opts;
use resource_files::resources_dir_path;
use rustc_serialize::json::{Json, ToJson};
use std::borrow::ToOwned;
use std::cmp::max;
use std::collections::HashMap;
use std::fs::File;
use std::io::{Read, Write, stderr};
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
lazy_static! {
pub static ref PREFS: Preferences = {
let defaults = default_prefs();
if let Ok(prefs) = read_prefs() {
defaults.extend(prefs);
}
defaults
};
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum PrefValue {
Boolean(bool),
String(String),
Number(f64),
Missing
}
impl PrefValue {
pub fn from_json(data: Json) -> Result<PrefValue, ()> {
let value = match data {
Json::Boolean(x) => PrefValue::Boolean(x),
Json::String(x) => PrefValue::String(x),
Json::F64(x) => PrefValue::Number(x),
Json::I64(x) => PrefValue::Number(x as f64),
Json::U64(x) => PrefValue::Number(x as f64),
_ => return Err(())
};
Ok(value)
}
pub fn as_boolean(&self) -> Option<bool> {
match *self {
PrefValue::Boolean(value) => {
Some(value)
},
_ => None
}
}
pub fn as_string(&self) -> Option<&str> {
match *self {
PrefValue::String(ref value) => {
Some(&value)
},
_ => None
}
}
pub fn as_i64(&self) -> Option<i64> {
match *self {
PrefValue::Number(x) => Some(x as i64),
_ => None,
}
}
pub fn as_u64(&self) -> Option<u64> {
match *self {
PrefValue::Number(x) => Some(x as u64),
_ => None,
}
}
}
impl ToJson for PrefValue {
fn to_json(&self) -> Json {
match *self {
PrefValue::Boolean(x) => {
Json::Boolean(x)
},
PrefValue::String(ref x) => {
Json::String(x.clone())
},
PrefValue::Number(x) => {
Json::F64(x)
},
PrefValue::Missing => Json::Null
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum Pref {
NoDefault(Arc<PrefValue>),
WithDefault(Arc<PrefValue>, Option<Arc<PrefValue>>)
}
impl Pref {
pub fn new(value: PrefValue) -> Pref {
Pref::NoDefault(Arc::new(value))
}
fn new_default(value: PrefValue) -> Pref {
Pref::WithDefault(Arc::new(value), None)
}
fn from_json(data: Json) -> Result<Pref, ()> {
let value = PrefValue::from_json(data)?;
Ok(Pref::new_default(value))
}
pub fn value(&self) -> &Arc<PrefValue> {
match *self {
Pref::NoDefault(ref x) => x,
Pref::WithDefault(ref default, ref override_value) => {
match *override_value {
Some(ref x) => x,
None => default
}
}
}
}
fn set(&mut self, value: PrefValue) {
// TODO - this should error if we try to override a pref of one type
// with a value of a different type
match *self {
Pref::NoDefault(ref mut pref_value) => {
*pref_value = Arc::new(value)
},
Pref::WithDefault(_, ref mut override_value) => {
*override_value = Some(Arc::new(value))
}
}
}
}
impl ToJson for Pref {
fn to_json(&self) -> Json {
self.value().to_json()
}
}
pub fn default_prefs() -> Preferences {
let prefs = Preferences(Arc::new(RwLock::new(HashMap::new())));
prefs.set("layout.threads", PrefValue::Number(
max(num_cpus::get() * 3 / 4, 1) as f64));
prefs
}
pub fn read_prefs_from_file<T>(mut file: T)
-> Result<HashMap<String, Pref>, ()> where T: Read {
let json = Json::from_reader(&mut file).or_else(|e| {
println!("Ignoring invalid JSON in preferences: {:?}.", e);
Err(())
})?;
let mut prefs = HashMap::new();
if let Json::Object(obj) = json {
for (name, value) in obj.into_iter() {
match Pref::from_json(value) {
Ok(x) => {
prefs.insert(name, x);
},
Err(_) => println!("Ignoring non-boolean/string/i64 preference value for {:?}", name),
}
}
}
Ok(prefs)
}
pub fn add_user_prefs() {
match opts::get().config_dir {
Some(ref config_path) => {
let mut path = PathBuf::from(config_path);
init_user_prefs(&mut path);
}
None => {
let mut path = default_config_dir().unwrap();
if path.join("prefs.json").exists() {
init_user_prefs(&mut path);
}
}
}
}
fn init_user_prefs(path: &mut PathBuf) {
path.push("prefs.json");
if let Ok(file) = File::open(path) {
if let Ok(prefs) = read_prefs_from_file(file) {
PREFS.extend(prefs);
}
} else {
writeln!(&mut stderr(), "Error opening prefs.json from config directory")
.expect("failed printing to stderr");
}
}
fn read_prefs() -> Result<HashMap<String, Pref>, ()> {
let mut path = resources_dir_path().map_err(|_| ())?;
path.push("prefs.json");
let file = File::open(path).or_else(|e| {
writeln!(&mut stderr(), "Error opening preferences: {:?}.", e)
.expect("failed printing to stderr");
Err(())
})?;
read_prefs_from_file(file)
}
pub struct Preferences(Arc<RwLock<HashMap<String, Pref>>>);
impl Preferences {
pub fn get(&self, name: &str) -> Arc<PrefValue> {
self.0.read().unwrap().get(name).map_or(Arc::new(PrefValue::Missing), |x| x.value().clone())
}
pub fn cloned(&self) -> HashMap<String, Pref> {
self.0.read().unwrap().clone()
}
pub fn set(&self, name: &str, value: PrefValue) {
let mut prefs = self.0.write().unwrap();
if let Some(pref) = prefs.get_mut(name) {
pref.set(value);
return;
}
prefs.insert(name.to_owned(), Pref::new(value));
}
pub fn reset(&self, name: &str) -> Arc<PrefValue> {
let mut prefs = self.0.write().unwrap();
let result = match prefs.get_mut(name) {
None => return Arc::new(PrefValue::Missing),
Some(&mut Pref::NoDefault(_)) => Arc::new(PrefValue::Missing),
Some(&mut Pref::WithDefault(ref default, ref mut set_value)) => {
*set_value = None;
default.clone()
},
};
if *result == PrefValue::Missing {
prefs.remove(name);
}
result
}
pub fn reset_all(&self) {
let names = {
self.0.read().unwrap().keys().cloned().collect::<Vec<String>>()
};
for name in names.iter() {
self.reset(name);
}
}
pub fn extend(&self, extension: HashMap<String, Pref>) {
self.0.write().unwrap().extend(extension);
}
pub fn is_webvr_enabled(&self) -> bool {
self.get("dom.webvr.enabled").as_boolean().unwrap_or(false)
}
pub fn is_dom_to_texture_enabled(&self) -> bool {
self.get("dom.webgl.dom_to_texture.enabled").as_boolean().unwrap_or(false)
}
pub fn is_webgl2_enabled(&self) -> bool
|
}
|
{
self.get("dom.webgl2.enabled").as_boolean().unwrap_or(false)
}
|
identifier_body
|
prefs.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use basedir::default_config_dir;
use num_cpus;
use opts;
use resource_files::resources_dir_path;
use rustc_serialize::json::{Json, ToJson};
use std::borrow::ToOwned;
use std::cmp::max;
use std::collections::HashMap;
use std::fs::File;
use std::io::{Read, Write, stderr};
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
lazy_static! {
pub static ref PREFS: Preferences = {
let defaults = default_prefs();
if let Ok(prefs) = read_prefs() {
defaults.extend(prefs);
}
defaults
};
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum PrefValue {
Boolean(bool),
String(String),
Number(f64),
Missing
}
impl PrefValue {
pub fn from_json(data: Json) -> Result<PrefValue, ()> {
let value = match data {
Json::Boolean(x) => PrefValue::Boolean(x),
Json::String(x) => PrefValue::String(x),
Json::F64(x) => PrefValue::Number(x),
Json::I64(x) => PrefValue::Number(x as f64),
Json::U64(x) => PrefValue::Number(x as f64),
_ => return Err(())
};
Ok(value)
}
pub fn as_boolean(&self) -> Option<bool> {
match *self {
PrefValue::Boolean(value) => {
Some(value)
},
_ => None
}
}
pub fn as_string(&self) -> Option<&str> {
match *self {
PrefValue::String(ref value) => {
Some(&value)
},
_ => None
}
}
pub fn as_i64(&self) -> Option<i64> {
match *self {
PrefValue::Number(x) => Some(x as i64),
_ => None,
}
}
pub fn as_u64(&self) -> Option<u64> {
match *self {
PrefValue::Number(x) => Some(x as u64),
_ => None,
}
}
}
impl ToJson for PrefValue {
fn to_json(&self) -> Json {
match *self {
PrefValue::Boolean(x) => {
Json::Boolean(x)
},
PrefValue::String(ref x) => {
Json::String(x.clone())
},
PrefValue::Number(x) => {
Json::F64(x)
},
PrefValue::Missing => Json::Null
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum Pref {
NoDefault(Arc<PrefValue>),
WithDefault(Arc<PrefValue>, Option<Arc<PrefValue>>)
}
impl Pref {
pub fn new(value: PrefValue) -> Pref {
Pref::NoDefault(Arc::new(value))
}
fn new_default(value: PrefValue) -> Pref {
Pref::WithDefault(Arc::new(value), None)
}
fn from_json(data: Json) -> Result<Pref, ()> {
let value = PrefValue::from_json(data)?;
Ok(Pref::new_default(value))
}
pub fn value(&self) -> &Arc<PrefValue> {
match *self {
Pref::NoDefault(ref x) => x,
Pref::WithDefault(ref default, ref override_value) => {
|
Some(ref x) => x,
None => default
}
}
}
}
fn set(&mut self, value: PrefValue) {
// TODO - this should error if we try to override a pref of one type
// with a value of a different type
match *self {
Pref::NoDefault(ref mut pref_value) => {
*pref_value = Arc::new(value)
},
Pref::WithDefault(_, ref mut override_value) => {
*override_value = Some(Arc::new(value))
}
}
}
}
impl ToJson for Pref {
fn to_json(&self) -> Json {
self.value().to_json()
}
}
pub fn default_prefs() -> Preferences {
let prefs = Preferences(Arc::new(RwLock::new(HashMap::new())));
prefs.set("layout.threads", PrefValue::Number(
max(num_cpus::get() * 3 / 4, 1) as f64));
prefs
}
pub fn read_prefs_from_file<T>(mut file: T)
-> Result<HashMap<String, Pref>, ()> where T: Read {
let json = Json::from_reader(&mut file).or_else(|e| {
println!("Ignoring invalid JSON in preferences: {:?}.", e);
Err(())
})?;
let mut prefs = HashMap::new();
if let Json::Object(obj) = json {
for (name, value) in obj.into_iter() {
match Pref::from_json(value) {
Ok(x) => {
prefs.insert(name, x);
},
Err(_) => println!("Ignoring non-boolean/string/i64 preference value for {:?}", name),
}
}
}
Ok(prefs)
}
pub fn add_user_prefs() {
match opts::get().config_dir {
Some(ref config_path) => {
let mut path = PathBuf::from(config_path);
init_user_prefs(&mut path);
}
None => {
let mut path = default_config_dir().unwrap();
if path.join("prefs.json").exists() {
init_user_prefs(&mut path);
}
}
}
}
fn init_user_prefs(path: &mut PathBuf) {
path.push("prefs.json");
if let Ok(file) = File::open(path) {
if let Ok(prefs) = read_prefs_from_file(file) {
PREFS.extend(prefs);
}
} else {
writeln!(&mut stderr(), "Error opening prefs.json from config directory")
.expect("failed printing to stderr");
}
}
fn read_prefs() -> Result<HashMap<String, Pref>, ()> {
let mut path = resources_dir_path().map_err(|_| ())?;
path.push("prefs.json");
let file = File::open(path).or_else(|e| {
writeln!(&mut stderr(), "Error opening preferences: {:?}.", e)
.expect("failed printing to stderr");
Err(())
})?;
read_prefs_from_file(file)
}
pub struct Preferences(Arc<RwLock<HashMap<String, Pref>>>);
impl Preferences {
pub fn get(&self, name: &str) -> Arc<PrefValue> {
self.0.read().unwrap().get(name).map_or(Arc::new(PrefValue::Missing), |x| x.value().clone())
}
pub fn cloned(&self) -> HashMap<String, Pref> {
self.0.read().unwrap().clone()
}
pub fn set(&self, name: &str, value: PrefValue) {
let mut prefs = self.0.write().unwrap();
if let Some(pref) = prefs.get_mut(name) {
pref.set(value);
return;
}
prefs.insert(name.to_owned(), Pref::new(value));
}
pub fn reset(&self, name: &str) -> Arc<PrefValue> {
let mut prefs = self.0.write().unwrap();
let result = match prefs.get_mut(name) {
None => return Arc::new(PrefValue::Missing),
Some(&mut Pref::NoDefault(_)) => Arc::new(PrefValue::Missing),
Some(&mut Pref::WithDefault(ref default, ref mut set_value)) => {
*set_value = None;
default.clone()
},
};
if *result == PrefValue::Missing {
prefs.remove(name);
}
result
}
pub fn reset_all(&self) {
let names = {
self.0.read().unwrap().keys().cloned().collect::<Vec<String>>()
};
for name in names.iter() {
self.reset(name);
}
}
pub fn extend(&self, extension: HashMap<String, Pref>) {
self.0.write().unwrap().extend(extension);
}
pub fn is_webvr_enabled(&self) -> bool {
self.get("dom.webvr.enabled").as_boolean().unwrap_or(false)
}
pub fn is_dom_to_texture_enabled(&self) -> bool {
self.get("dom.webgl.dom_to_texture.enabled").as_boolean().unwrap_or(false)
}
pub fn is_webgl2_enabled(&self) -> bool {
self.get("dom.webgl2.enabled").as_boolean().unwrap_or(false)
}
}
|
match *override_value {
|
random_line_split
|
prefs.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use basedir::default_config_dir;
use num_cpus;
use opts;
use resource_files::resources_dir_path;
use rustc_serialize::json::{Json, ToJson};
use std::borrow::ToOwned;
use std::cmp::max;
use std::collections::HashMap;
use std::fs::File;
use std::io::{Read, Write, stderr};
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
lazy_static! {
pub static ref PREFS: Preferences = {
let defaults = default_prefs();
if let Ok(prefs) = read_prefs() {
defaults.extend(prefs);
}
defaults
};
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub enum PrefValue {
Boolean(bool),
String(String),
Number(f64),
Missing
}
impl PrefValue {
pub fn from_json(data: Json) -> Result<PrefValue, ()> {
let value = match data {
Json::Boolean(x) => PrefValue::Boolean(x),
Json::String(x) => PrefValue::String(x),
Json::F64(x) => PrefValue::Number(x),
Json::I64(x) => PrefValue::Number(x as f64),
Json::U64(x) => PrefValue::Number(x as f64),
_ => return Err(())
};
Ok(value)
}
pub fn as_boolean(&self) -> Option<bool> {
match *self {
PrefValue::Boolean(value) => {
Some(value)
},
_ => None
}
}
pub fn as_string(&self) -> Option<&str> {
match *self {
PrefValue::String(ref value) => {
Some(&value)
},
_ => None
}
}
pub fn as_i64(&self) -> Option<i64> {
match *self {
PrefValue::Number(x) => Some(x as i64),
_ => None,
}
}
pub fn as_u64(&self) -> Option<u64> {
match *self {
PrefValue::Number(x) => Some(x as u64),
_ => None,
}
}
}
impl ToJson for PrefValue {
fn to_json(&self) -> Json {
match *self {
PrefValue::Boolean(x) => {
Json::Boolean(x)
},
PrefValue::String(ref x) => {
Json::String(x.clone())
},
PrefValue::Number(x) => {
Json::F64(x)
},
PrefValue::Missing => Json::Null
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum Pref {
NoDefault(Arc<PrefValue>),
WithDefault(Arc<PrefValue>, Option<Arc<PrefValue>>)
}
impl Pref {
pub fn
|
(value: PrefValue) -> Pref {
Pref::NoDefault(Arc::new(value))
}
fn new_default(value: PrefValue) -> Pref {
Pref::WithDefault(Arc::new(value), None)
}
fn from_json(data: Json) -> Result<Pref, ()> {
let value = PrefValue::from_json(data)?;
Ok(Pref::new_default(value))
}
pub fn value(&self) -> &Arc<PrefValue> {
match *self {
Pref::NoDefault(ref x) => x,
Pref::WithDefault(ref default, ref override_value) => {
match *override_value {
Some(ref x) => x,
None => default
}
}
}
}
fn set(&mut self, value: PrefValue) {
// TODO - this should error if we try to override a pref of one type
// with a value of a different type
match *self {
Pref::NoDefault(ref mut pref_value) => {
*pref_value = Arc::new(value)
},
Pref::WithDefault(_, ref mut override_value) => {
*override_value = Some(Arc::new(value))
}
}
}
}
impl ToJson for Pref {
fn to_json(&self) -> Json {
self.value().to_json()
}
}
pub fn default_prefs() -> Preferences {
let prefs = Preferences(Arc::new(RwLock::new(HashMap::new())));
prefs.set("layout.threads", PrefValue::Number(
max(num_cpus::get() * 3 / 4, 1) as f64));
prefs
}
pub fn read_prefs_from_file<T>(mut file: T)
-> Result<HashMap<String, Pref>, ()> where T: Read {
let json = Json::from_reader(&mut file).or_else(|e| {
println!("Ignoring invalid JSON in preferences: {:?}.", e);
Err(())
})?;
let mut prefs = HashMap::new();
if let Json::Object(obj) = json {
for (name, value) in obj.into_iter() {
match Pref::from_json(value) {
Ok(x) => {
prefs.insert(name, x);
},
Err(_) => println!("Ignoring non-boolean/string/i64 preference value for {:?}", name),
}
}
}
Ok(prefs)
}
pub fn add_user_prefs() {
match opts::get().config_dir {
Some(ref config_path) => {
let mut path = PathBuf::from(config_path);
init_user_prefs(&mut path);
}
None => {
let mut path = default_config_dir().unwrap();
if path.join("prefs.json").exists() {
init_user_prefs(&mut path);
}
}
}
}
fn init_user_prefs(path: &mut PathBuf) {
path.push("prefs.json");
if let Ok(file) = File::open(path) {
if let Ok(prefs) = read_prefs_from_file(file) {
PREFS.extend(prefs);
}
} else {
writeln!(&mut stderr(), "Error opening prefs.json from config directory")
.expect("failed printing to stderr");
}
}
fn read_prefs() -> Result<HashMap<String, Pref>, ()> {
let mut path = resources_dir_path().map_err(|_| ())?;
path.push("prefs.json");
let file = File::open(path).or_else(|e| {
writeln!(&mut stderr(), "Error opening preferences: {:?}.", e)
.expect("failed printing to stderr");
Err(())
})?;
read_prefs_from_file(file)
}
pub struct Preferences(Arc<RwLock<HashMap<String, Pref>>>);
impl Preferences {
pub fn get(&self, name: &str) -> Arc<PrefValue> {
self.0.read().unwrap().get(name).map_or(Arc::new(PrefValue::Missing), |x| x.value().clone())
}
pub fn cloned(&self) -> HashMap<String, Pref> {
self.0.read().unwrap().clone()
}
pub fn set(&self, name: &str, value: PrefValue) {
let mut prefs = self.0.write().unwrap();
if let Some(pref) = prefs.get_mut(name) {
pref.set(value);
return;
}
prefs.insert(name.to_owned(), Pref::new(value));
}
pub fn reset(&self, name: &str) -> Arc<PrefValue> {
let mut prefs = self.0.write().unwrap();
let result = match prefs.get_mut(name) {
None => return Arc::new(PrefValue::Missing),
Some(&mut Pref::NoDefault(_)) => Arc::new(PrefValue::Missing),
Some(&mut Pref::WithDefault(ref default, ref mut set_value)) => {
*set_value = None;
default.clone()
},
};
if *result == PrefValue::Missing {
prefs.remove(name);
}
result
}
pub fn reset_all(&self) {
let names = {
self.0.read().unwrap().keys().cloned().collect::<Vec<String>>()
};
for name in names.iter() {
self.reset(name);
}
}
pub fn extend(&self, extension: HashMap<String, Pref>) {
self.0.write().unwrap().extend(extension);
}
pub fn is_webvr_enabled(&self) -> bool {
self.get("dom.webvr.enabled").as_boolean().unwrap_or(false)
}
pub fn is_dom_to_texture_enabled(&self) -> bool {
self.get("dom.webgl.dom_to_texture.enabled").as_boolean().unwrap_or(false)
}
pub fn is_webgl2_enabled(&self) -> bool {
self.get("dom.webgl2.enabled").as_boolean().unwrap_or(false)
}
}
|
new
|
identifier_name
|
exponential.rs
|
use std::num::{sin, cos};
use extra::complex::Cmplx;
use nodes;
use eval::array_helpers::{simple_monadic_array};
use eval::eval::{AplFloat, AplInteger, AplComplex, AplArray, Value, eval_monadic};
use math_constants::e;
pub fn exponential(first: &Value) -> Result<~Value, ~str> {
match first {
&AplFloat(val) => {
Ok(~AplFloat(val.exp()))
},
&AplInteger(val) => {
Ok(~AplFloat((val as f64).exp()))
},
&AplComplex(c) => {
let powed = e.pow(&c.re);
let left = cos(c.im);
let right = sin(c.im);
let complex = Cmplx::new(left, right);
|
}
}
}
pub fn eval_exponential(left: &nodes::Node) -> Result<~Value, ~str> {
eval_monadic(exponential, left)
}
|
let result = Cmplx::new(powed, 0.0) * complex;
Ok(~AplComplex(result))
},
&AplArray(ref _depth, ref _dimensions, ref _values) => {
simple_monadic_array(exponential, first)
|
random_line_split
|
exponential.rs
|
use std::num::{sin, cos};
use extra::complex::Cmplx;
use nodes;
use eval::array_helpers::{simple_monadic_array};
use eval::eval::{AplFloat, AplInteger, AplComplex, AplArray, Value, eval_monadic};
use math_constants::e;
pub fn
|
(first: &Value) -> Result<~Value, ~str> {
match first {
&AplFloat(val) => {
Ok(~AplFloat(val.exp()))
},
&AplInteger(val) => {
Ok(~AplFloat((val as f64).exp()))
},
&AplComplex(c) => {
let powed = e.pow(&c.re);
let left = cos(c.im);
let right = sin(c.im);
let complex = Cmplx::new(left, right);
let result = Cmplx::new(powed, 0.0) * complex;
Ok(~AplComplex(result))
},
&AplArray(ref _depth, ref _dimensions, ref _values) => {
simple_monadic_array(exponential, first)
}
}
}
pub fn eval_exponential(left: &nodes::Node) -> Result<~Value, ~str> {
eval_monadic(exponential, left)
}
|
exponential
|
identifier_name
|
exponential.rs
|
use std::num::{sin, cos};
use extra::complex::Cmplx;
use nodes;
use eval::array_helpers::{simple_monadic_array};
use eval::eval::{AplFloat, AplInteger, AplComplex, AplArray, Value, eval_monadic};
use math_constants::e;
pub fn exponential(first: &Value) -> Result<~Value, ~str> {
match first {
&AplFloat(val) =>
|
,
&AplInteger(val) => {
Ok(~AplFloat((val as f64).exp()))
},
&AplComplex(c) => {
let powed = e.pow(&c.re);
let left = cos(c.im);
let right = sin(c.im);
let complex = Cmplx::new(left, right);
let result = Cmplx::new(powed, 0.0) * complex;
Ok(~AplComplex(result))
},
&AplArray(ref _depth, ref _dimensions, ref _values) => {
simple_monadic_array(exponential, first)
}
}
}
pub fn eval_exponential(left: &nodes::Node) -> Result<~Value, ~str> {
eval_monadic(exponential, left)
}
|
{
Ok(~AplFloat(val.exp()))
}
|
conditional_block
|
exponential.rs
|
use std::num::{sin, cos};
use extra::complex::Cmplx;
use nodes;
use eval::array_helpers::{simple_monadic_array};
use eval::eval::{AplFloat, AplInteger, AplComplex, AplArray, Value, eval_monadic};
use math_constants::e;
pub fn exponential(first: &Value) -> Result<~Value, ~str> {
match first {
&AplFloat(val) => {
Ok(~AplFloat(val.exp()))
},
&AplInteger(val) => {
Ok(~AplFloat((val as f64).exp()))
},
&AplComplex(c) => {
let powed = e.pow(&c.re);
let left = cos(c.im);
let right = sin(c.im);
let complex = Cmplx::new(left, right);
let result = Cmplx::new(powed, 0.0) * complex;
Ok(~AplComplex(result))
},
&AplArray(ref _depth, ref _dimensions, ref _values) => {
simple_monadic_array(exponential, first)
}
}
}
pub fn eval_exponential(left: &nodes::Node) -> Result<~Value, ~str>
|
{
eval_monadic(exponential, left)
}
|
identifier_body
|
|
foo.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is a test which attempts to blow out the system limit with how many
// arguments can be passed to a process. This'll successively call rustc with
// larger and larger argument lists in an attempt to find one that's way too
// big for the system at hand. This file itself is then used as a "linker" to
// detect when the process creation succeeds.
//
// Eventually we should see an argument that looks like `@` as we switch from
// passing literal arguments to passing everything in the file.
use std::env;
use std::fs::{self, File};
use std::io::{BufWriter, Write, Read};
use std::path::PathBuf;
use std::process::Command;
fn main()
|
}
for j in 0..i {
writeln!(f, "#[link(name = \"{}{}\")]", lib_name, j).unwrap();
}
writeln!(f, "extern {{}}\nfn main() {{}}").unwrap();
f.into_inner().unwrap();
drop(fs::remove_file(&ok));
let output = Command::new(&rustc)
.arg(&file)
.arg("-C").arg(&me_as_linker)
.arg("--out-dir").arg(&tmpdir)
.env("YOU_ARE_A_LINKER", "1")
.output()
.unwrap();
if!output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
panic!("status: {}\nstdout:\n{}\nstderr:\n{}",
output.status,
String::from_utf8_lossy(&output.stdout),
stderr.lines().map(|l| {
if l.len() > 200 {
format!("{}...\n", &l[..200])
} else {
format!("{}\n", l)
}
}).collect::<String>());
}
if!ok.exists() {
continue
}
let mut contents = Vec::new();
File::open(&ok).unwrap().read_to_end(&mut contents).unwrap();
for j in 0..i {
let exp = format!("{}{}", lib_name, j);
let exp = if cfg!(target_env = "msvc") {
let mut out = Vec::with_capacity(exp.len() * 2);
for c in exp.encode_utf16() {
// encode in little endian
out.push(c as u8);
out.push((c >> 8) as u8);
}
out
} else {
exp.into_bytes()
};
assert!(contents.windows(exp.len()).any(|w| w == &exp[..]));
}
break
}
}
|
{
let tmpdir = PathBuf::from(env::var_os("TMPDIR").unwrap());
let ok = tmpdir.join("ok");
if env::var("YOU_ARE_A_LINKER").is_ok() {
if let Some(file) = env::args_os().find(|a| a.to_string_lossy().contains("@")) {
let file = file.to_str().expect("non-utf8 file argument");
fs::copy(&file[1..], &ok).unwrap();
}
return
}
let rustc = env::var_os("RUSTC").unwrap_or("rustc".into());
let me_as_linker = format!("linker={}", env::current_exe().unwrap().display());
for i in (1..).map(|i| i * 100) {
println!("attempt: {}", i);
let file = tmpdir.join("bar.rs");
let mut f = BufWriter::new(File::create(&file).unwrap());
let mut lib_name = String::new();
for _ in 0..i {
lib_name.push_str("foo");
|
identifier_body
|
foo.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is a test which attempts to blow out the system limit with how many
// arguments can be passed to a process. This'll successively call rustc with
// larger and larger argument lists in an attempt to find one that's way too
// big for the system at hand. This file itself is then used as a "linker" to
// detect when the process creation succeeds.
//
// Eventually we should see an argument that looks like `@` as we switch from
// passing literal arguments to passing everything in the file.
use std::env;
use std::fs::{self, File};
use std::io::{BufWriter, Write, Read};
use std::path::PathBuf;
use std::process::Command;
fn main() {
let tmpdir = PathBuf::from(env::var_os("TMPDIR").unwrap());
let ok = tmpdir.join("ok");
if env::var("YOU_ARE_A_LINKER").is_ok() {
if let Some(file) = env::args_os().find(|a| a.to_string_lossy().contains("@")) {
let file = file.to_str().expect("non-utf8 file argument");
fs::copy(&file[1..], &ok).unwrap();
}
return
}
let rustc = env::var_os("RUSTC").unwrap_or("rustc".into());
let me_as_linker = format!("linker={}", env::current_exe().unwrap().display());
for i in (1..).map(|i| i * 100) {
println!("attempt: {}", i);
let file = tmpdir.join("bar.rs");
let mut f = BufWriter::new(File::create(&file).unwrap());
let mut lib_name = String::new();
for _ in 0..i {
lib_name.push_str("foo");
}
for j in 0..i {
writeln!(f, "#[link(name = \"{}{}\")]", lib_name, j).unwrap();
}
writeln!(f, "extern {{}}\nfn main() {{}}").unwrap();
f.into_inner().unwrap();
drop(fs::remove_file(&ok));
let output = Command::new(&rustc)
.arg(&file)
.arg("-C").arg(&me_as_linker)
.arg("--out-dir").arg(&tmpdir)
.env("YOU_ARE_A_LINKER", "1")
.output()
.unwrap();
if!output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
panic!("status: {}\nstdout:\n{}\nstderr:\n{}",
output.status,
String::from_utf8_lossy(&output.stdout),
stderr.lines().map(|l| {
if l.len() > 200 {
format!("{}...\n", &l[..200])
} else {
format!("{}\n", l)
}
}).collect::<String>());
}
if!ok.exists() {
continue
}
let mut contents = Vec::new();
File::open(&ok).unwrap().read_to_end(&mut contents).unwrap();
for j in 0..i {
let exp = format!("{}{}", lib_name, j);
let exp = if cfg!(target_env = "msvc")
|
else {
exp.into_bytes()
};
assert!(contents.windows(exp.len()).any(|w| w == &exp[..]));
}
break
}
}
|
{
let mut out = Vec::with_capacity(exp.len() * 2);
for c in exp.encode_utf16() {
// encode in little endian
out.push(c as u8);
out.push((c >> 8) as u8);
}
out
}
|
conditional_block
|
foo.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is a test which attempts to blow out the system limit with how many
// arguments can be passed to a process. This'll successively call rustc with
// larger and larger argument lists in an attempt to find one that's way too
// big for the system at hand. This file itself is then used as a "linker" to
// detect when the process creation succeeds.
//
// Eventually we should see an argument that looks like `@` as we switch from
// passing literal arguments to passing everything in the file.
use std::env;
use std::fs::{self, File};
use std::io::{BufWriter, Write, Read};
use std::path::PathBuf;
use std::process::Command;
fn
|
() {
let tmpdir = PathBuf::from(env::var_os("TMPDIR").unwrap());
let ok = tmpdir.join("ok");
if env::var("YOU_ARE_A_LINKER").is_ok() {
if let Some(file) = env::args_os().find(|a| a.to_string_lossy().contains("@")) {
let file = file.to_str().expect("non-utf8 file argument");
fs::copy(&file[1..], &ok).unwrap();
}
return
}
let rustc = env::var_os("RUSTC").unwrap_or("rustc".into());
let me_as_linker = format!("linker={}", env::current_exe().unwrap().display());
for i in (1..).map(|i| i * 100) {
println!("attempt: {}", i);
let file = tmpdir.join("bar.rs");
let mut f = BufWriter::new(File::create(&file).unwrap());
let mut lib_name = String::new();
for _ in 0..i {
lib_name.push_str("foo");
}
for j in 0..i {
writeln!(f, "#[link(name = \"{}{}\")]", lib_name, j).unwrap();
}
writeln!(f, "extern {{}}\nfn main() {{}}").unwrap();
f.into_inner().unwrap();
drop(fs::remove_file(&ok));
let output = Command::new(&rustc)
.arg(&file)
.arg("-C").arg(&me_as_linker)
.arg("--out-dir").arg(&tmpdir)
.env("YOU_ARE_A_LINKER", "1")
.output()
.unwrap();
if!output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
panic!("status: {}\nstdout:\n{}\nstderr:\n{}",
output.status,
String::from_utf8_lossy(&output.stdout),
stderr.lines().map(|l| {
if l.len() > 200 {
format!("{}...\n", &l[..200])
} else {
format!("{}\n", l)
}
}).collect::<String>());
}
if!ok.exists() {
continue
}
let mut contents = Vec::new();
File::open(&ok).unwrap().read_to_end(&mut contents).unwrap();
for j in 0..i {
let exp = format!("{}{}", lib_name, j);
let exp = if cfg!(target_env = "msvc") {
let mut out = Vec::with_capacity(exp.len() * 2);
for c in exp.encode_utf16() {
// encode in little endian
out.push(c as u8);
out.push((c >> 8) as u8);
}
out
} else {
exp.into_bytes()
};
assert!(contents.windows(exp.len()).any(|w| w == &exp[..]));
}
break
}
}
|
main
|
identifier_name
|
foo.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is a test which attempts to blow out the system limit with how many
// arguments can be passed to a process. This'll successively call rustc with
// larger and larger argument lists in an attempt to find one that's way too
// big for the system at hand. This file itself is then used as a "linker" to
// detect when the process creation succeeds.
//
// Eventually we should see an argument that looks like `@` as we switch from
// passing literal arguments to passing everything in the file.
use std::env;
use std::fs::{self, File};
use std::io::{BufWriter, Write, Read};
use std::path::PathBuf;
use std::process::Command;
fn main() {
let tmpdir = PathBuf::from(env::var_os("TMPDIR").unwrap());
let ok = tmpdir.join("ok");
if env::var("YOU_ARE_A_LINKER").is_ok() {
if let Some(file) = env::args_os().find(|a| a.to_string_lossy().contains("@")) {
let file = file.to_str().expect("non-utf8 file argument");
fs::copy(&file[1..], &ok).unwrap();
|
}
let rustc = env::var_os("RUSTC").unwrap_or("rustc".into());
let me_as_linker = format!("linker={}", env::current_exe().unwrap().display());
for i in (1..).map(|i| i * 100) {
println!("attempt: {}", i);
let file = tmpdir.join("bar.rs");
let mut f = BufWriter::new(File::create(&file).unwrap());
let mut lib_name = String::new();
for _ in 0..i {
lib_name.push_str("foo");
}
for j in 0..i {
writeln!(f, "#[link(name = \"{}{}\")]", lib_name, j).unwrap();
}
writeln!(f, "extern {{}}\nfn main() {{}}").unwrap();
f.into_inner().unwrap();
drop(fs::remove_file(&ok));
let output = Command::new(&rustc)
.arg(&file)
.arg("-C").arg(&me_as_linker)
.arg("--out-dir").arg(&tmpdir)
.env("YOU_ARE_A_LINKER", "1")
.output()
.unwrap();
if!output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
panic!("status: {}\nstdout:\n{}\nstderr:\n{}",
output.status,
String::from_utf8_lossy(&output.stdout),
stderr.lines().map(|l| {
if l.len() > 200 {
format!("{}...\n", &l[..200])
} else {
format!("{}\n", l)
}
}).collect::<String>());
}
if!ok.exists() {
continue
}
let mut contents = Vec::new();
File::open(&ok).unwrap().read_to_end(&mut contents).unwrap();
for j in 0..i {
let exp = format!("{}{}", lib_name, j);
let exp = if cfg!(target_env = "msvc") {
let mut out = Vec::with_capacity(exp.len() * 2);
for c in exp.encode_utf16() {
// encode in little endian
out.push(c as u8);
out.push((c >> 8) as u8);
}
out
} else {
exp.into_bytes()
};
assert!(contents.windows(exp.len()).any(|w| w == &exp[..]));
}
break
}
}
|
}
return
|
random_line_split
|
import-glob-circular.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: unresolved
#[feature(globs)];
mod circ1 {
pub use circ2::f2;
pub fn f1() { println!("f1"); }
pub fn common() -> uint { return 0u; }
}
|
}
mod test {
use circ1::*;
fn test() { f1066(); }
}
|
mod circ2 {
pub use circ1::f1;
pub fn f2() { println!("f2"); }
pub fn common() -> uint { return 1u; }
|
random_line_split
|
import-glob-circular.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: unresolved
#[feature(globs)];
mod circ1 {
pub use circ2::f2;
pub fn
|
() { println!("f1"); }
pub fn common() -> uint { return 0u; }
}
mod circ2 {
pub use circ1::f1;
pub fn f2() { println!("f2"); }
pub fn common() -> uint { return 1u; }
}
mod test {
use circ1::*;
fn test() { f1066(); }
}
|
f1
|
identifier_name
|
import-glob-circular.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: unresolved
#[feature(globs)];
mod circ1 {
pub use circ2::f2;
pub fn f1()
|
pub fn common() -> uint { return 0u; }
}
mod circ2 {
pub use circ1::f1;
pub fn f2() { println!("f2"); }
pub fn common() -> uint { return 1u; }
}
mod test {
use circ1::*;
fn test() { f1066(); }
}
|
{ println!("f1"); }
|
identifier_body
|
reverse_words_str.rs
|
// Implements http://rosettacode.org/wiki/Reverse_words_in_a_string
#![feature(str_words)]
fn rev_words(line: &str) -> String {
line.words().rev().collect::<Vec<&str>>().connect(" ")
}
fn rev_words_on_lines(text: &str) -> String {
text.lines().map(rev_words).collect::<Vec<String>>().connect("\n")
}
#[cfg(not(test))]
fn main() {
let text = "---------- Ice and Fire ------------
fire, in end will world the say Some
ice. in say Some
desire of tasted I've what From
fire. favor who those with hold I
... elided paragraph last...
Frost Robert -----------------------";
println!("{}", rev_words_on_lines(text));
}
#[test]
fn test_rev_words()
|
#[test]
fn test_rev_words_on_lines() {
// The tests from test_rev_words should have the same results, so
// we include them.
let tests = [("", ""),
("a", "a"),
("a b", "b a"),
("cat dog", "dog cat"),
// According to the problem, multiple spaces can be
// compressed into a single space.
("cat dog", "dog cat"),
("cat dog frog", "frog dog cat"),
// Multiple Lines
("a b\nb a", "b a\na b"),
("a b\nc d\ne f", "b a\nd c\nf e")];
for &(input, expected) in &tests {
let output = rev_words_on_lines(input);
assert_eq!(expected, output);
}
}
|
{
let tests = [("", ""),
("a", "a"),
("a b", "b a"),
("cat dog", "dog cat"),
// According to the problem, multiple spaces can be
// compressed into a single space.
("cat dog", "dog cat"),
("cat dog frog", "frog dog cat")];
for &(input, expected) in &tests {
let output = rev_words(input);
assert_eq!(expected, output);
}
}
|
identifier_body
|
reverse_words_str.rs
|
// Implements http://rosettacode.org/wiki/Reverse_words_in_a_string
#![feature(str_words)]
fn rev_words(line: &str) -> String {
line.words().rev().collect::<Vec<&str>>().connect(" ")
|
text.lines().map(rev_words).collect::<Vec<String>>().connect("\n")
}
#[cfg(not(test))]
fn main() {
let text = "---------- Ice and Fire ------------
fire, in end will world the say Some
ice. in say Some
desire of tasted I've what From
fire. favor who those with hold I
... elided paragraph last...
Frost Robert -----------------------";
println!("{}", rev_words_on_lines(text));
}
#[test]
fn test_rev_words() {
let tests = [("", ""),
("a", "a"),
("a b", "b a"),
("cat dog", "dog cat"),
// According to the problem, multiple spaces can be
// compressed into a single space.
("cat dog", "dog cat"),
("cat dog frog", "frog dog cat")];
for &(input, expected) in &tests {
let output = rev_words(input);
assert_eq!(expected, output);
}
}
#[test]
fn test_rev_words_on_lines() {
// The tests from test_rev_words should have the same results, so
// we include them.
let tests = [("", ""),
("a", "a"),
("a b", "b a"),
("cat dog", "dog cat"),
// According to the problem, multiple spaces can be
// compressed into a single space.
("cat dog", "dog cat"),
("cat dog frog", "frog dog cat"),
// Multiple Lines
("a b\nb a", "b a\na b"),
("a b\nc d\ne f", "b a\nd c\nf e")];
for &(input, expected) in &tests {
let output = rev_words_on_lines(input);
assert_eq!(expected, output);
}
}
|
}
fn rev_words_on_lines(text: &str) -> String {
|
random_line_split
|
reverse_words_str.rs
|
// Implements http://rosettacode.org/wiki/Reverse_words_in_a_string
#![feature(str_words)]
fn rev_words(line: &str) -> String {
line.words().rev().collect::<Vec<&str>>().connect(" ")
}
fn rev_words_on_lines(text: &str) -> String {
text.lines().map(rev_words).collect::<Vec<String>>().connect("\n")
}
#[cfg(not(test))]
fn main() {
let text = "---------- Ice and Fire ------------
fire, in end will world the say Some
ice. in say Some
desire of tasted I've what From
fire. favor who those with hold I
... elided paragraph last...
Frost Robert -----------------------";
println!("{}", rev_words_on_lines(text));
}
#[test]
fn
|
() {
let tests = [("", ""),
("a", "a"),
("a b", "b a"),
("cat dog", "dog cat"),
// According to the problem, multiple spaces can be
// compressed into a single space.
("cat dog", "dog cat"),
("cat dog frog", "frog dog cat")];
for &(input, expected) in &tests {
let output = rev_words(input);
assert_eq!(expected, output);
}
}
#[test]
fn test_rev_words_on_lines() {
// The tests from test_rev_words should have the same results, so
// we include them.
let tests = [("", ""),
("a", "a"),
("a b", "b a"),
("cat dog", "dog cat"),
// According to the problem, multiple spaces can be
// compressed into a single space.
("cat dog", "dog cat"),
("cat dog frog", "frog dog cat"),
// Multiple Lines
("a b\nb a", "b a\na b"),
("a b\nc d\ne f", "b a\nd c\nf e")];
for &(input, expected) in &tests {
let output = rev_words_on_lines(input);
assert_eq!(expected, output);
}
}
|
test_rev_words
|
identifier_name
|
htmlbrelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLBRElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLBRElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLBRElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
#[must_root]
pub struct HTMLBRElement {
pub htmlelement: HTMLElement,
}
impl HTMLBRElementDerived for EventTarget {
fn is_htmlbrelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLBRElementTypeId))
}
}
impl HTMLBRElement {
pub fn new_inherited(localName: DOMString, document: JSRef<Document>) -> HTMLBRElement {
HTMLBRElement {
htmlelement: HTMLElement::new_inherited(HTMLBRElementTypeId, localName, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, document: JSRef<Document>) -> Temporary<HTMLBRElement> {
let element = HTMLBRElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLBRElementBinding::Wrap)
}
}
impl Reflectable for HTMLBRElement {
fn reflector<'a>(&'a self) -> &'a Reflector
|
}
|
{
self.htmlelement.reflector()
}
|
identifier_body
|
htmlbrelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLBRElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLBRElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLBRElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
#[must_root]
pub struct HTMLBRElement {
pub htmlelement: HTMLElement,
}
impl HTMLBRElementDerived for EventTarget {
fn is_htmlbrelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLBRElementTypeId))
}
}
impl HTMLBRElement {
pub fn
|
(localName: DOMString, document: JSRef<Document>) -> HTMLBRElement {
HTMLBRElement {
htmlelement: HTMLElement::new_inherited(HTMLBRElementTypeId, localName, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, document: JSRef<Document>) -> Temporary<HTMLBRElement> {
let element = HTMLBRElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLBRElementBinding::Wrap)
}
}
impl Reflectable for HTMLBRElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
new_inherited
|
identifier_name
|
htmlbrelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLBRElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLBRElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLBRElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
#[must_root]
pub struct HTMLBRElement {
pub htmlelement: HTMLElement,
|
impl HTMLBRElementDerived for EventTarget {
fn is_htmlbrelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLBRElementTypeId))
}
}
impl HTMLBRElement {
pub fn new_inherited(localName: DOMString, document: JSRef<Document>) -> HTMLBRElement {
HTMLBRElement {
htmlelement: HTMLElement::new_inherited(HTMLBRElementTypeId, localName, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, document: JSRef<Document>) -> Temporary<HTMLBRElement> {
let element = HTMLBRElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLBRElementBinding::Wrap)
}
}
impl Reflectable for HTMLBRElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
}
|
random_line_split
|
car.rs
|
extern crate quack;
use quack::*;
#[derive(Copy, Clone, Debug)]
pub struct Tire {pub winter: bool}
#[derive(Debug, Clone)]
pub struct Car {pub tires: [Tire; 4]}
pub struct LeftFrontTire(pub Tire);
pub struct RightFrontTire(pub Tire);
pub struct LeftBackTire(pub Tire);
pub struct RightBackTire(pub Tire);
quack!{
for Car {
get_set LeftFrontTire(self.tires[0]),
get_set RightFrontTire(self.tires[1]),
get_set LeftBackTire(self.tires[2]),
get_set RightBackTire(self.tires[3]),
}
}
pub struct ShiftToWinterTires;
impl Action<ShiftToWinterTires> for Car {
type Result = ();
fn action(&mut self, _: ShiftToWinterTires) -> () {
for i in 0..4 {self.tires[i].winter = true}
}
}
pub struct ShiftToSummerTires;
impl Action<ShiftToSummerTires> for Car {
type Result = ();
fn action(&mut self, _: ShiftToSummerTires) -> () {
for i in 0..4 {self.tires[i].winter = false}
}
}
// Implement trait on top of duck type object.
pub trait GenericCar:
GetSet<LeftFrontTire> +
GetSet<RightFrontTire> +
GetSet<LeftBackTire> +
GetSet<RightBackTire> +
Action<ShiftToSummerTires> +
Action<ShiftToWinterTires>
{
fn left_front_tire(&self) -> Tire {Get::<LeftFrontTire>::get(self).0}
fn
|
(&self) -> Tire {Get::<RightFrontTire>::get(self).0}
fn left_back_tire(&self) -> Tire {Get::<LeftBackTire>::get(self).0}
fn right_back_tire(&self) -> Tire {Get::<RightBackTire>::get(self).0}
fn set_left_front_tire(&mut self, val: Tire) {self.set(LeftFrontTire(val))}
fn set_right_front_tire(&mut self, val: Tire) {self.set(RightFrontTire(val))}
fn set_left_back_tire(&mut self, val: Tire) {self.set(LeftBackTire(val))}
fn set_right_back_tire(&mut self, val: Tire) {self.set(RightBackTire(val))}
fn shift_to_winter_tires(&mut self) {self.action(ShiftToWinterTires);}
fn shift_to_summer_tires(&mut self) {self.action(ShiftToSummerTires);}
}
// Auto implement `GenericCar`.
impl<T> GenericCar for T where T:
GetSet<LeftFrontTire> +
GetSet<RightFrontTire> +
GetSet<LeftBackTire> +
GetSet<RightBackTire> +
Action<ShiftToSummerTires> +
Action<ShiftToWinterTires>
{}
fn main() {
let mut car = Car {tires: [Tire {winter: false}; 4]};
car.shift_to_winter_tires();
println!("{:?}", car);
car.set_left_front_tire(Tire {winter: false});
println!("Left front tire: {:?}", car.left_front_tire());
}
|
right_front_tire
|
identifier_name
|
car.rs
|
extern crate quack;
use quack::*;
#[derive(Copy, Clone, Debug)]
pub struct Tire {pub winter: bool}
#[derive(Debug, Clone)]
pub struct Car {pub tires: [Tire; 4]}
pub struct LeftFrontTire(pub Tire);
pub struct RightFrontTire(pub Tire);
pub struct LeftBackTire(pub Tire);
pub struct RightBackTire(pub Tire);
quack!{
for Car {
get_set LeftFrontTire(self.tires[0]),
get_set RightFrontTire(self.tires[1]),
get_set LeftBackTire(self.tires[2]),
get_set RightBackTire(self.tires[3]),
}
}
pub struct ShiftToWinterTires;
impl Action<ShiftToWinterTires> for Car {
type Result = ();
fn action(&mut self, _: ShiftToWinterTires) -> () {
for i in 0..4 {self.tires[i].winter = true}
}
}
pub struct ShiftToSummerTires;
impl Action<ShiftToSummerTires> for Car {
type Result = ();
fn action(&mut self, _: ShiftToSummerTires) -> ()
|
}
// Implement trait on top of duck type object.
pub trait GenericCar:
GetSet<LeftFrontTire> +
GetSet<RightFrontTire> +
GetSet<LeftBackTire> +
GetSet<RightBackTire> +
Action<ShiftToSummerTires> +
Action<ShiftToWinterTires>
{
fn left_front_tire(&self) -> Tire {Get::<LeftFrontTire>::get(self).0}
fn right_front_tire(&self) -> Tire {Get::<RightFrontTire>::get(self).0}
fn left_back_tire(&self) -> Tire {Get::<LeftBackTire>::get(self).0}
fn right_back_tire(&self) -> Tire {Get::<RightBackTire>::get(self).0}
fn set_left_front_tire(&mut self, val: Tire) {self.set(LeftFrontTire(val))}
fn set_right_front_tire(&mut self, val: Tire) {self.set(RightFrontTire(val))}
fn set_left_back_tire(&mut self, val: Tire) {self.set(LeftBackTire(val))}
fn set_right_back_tire(&mut self, val: Tire) {self.set(RightBackTire(val))}
fn shift_to_winter_tires(&mut self) {self.action(ShiftToWinterTires);}
fn shift_to_summer_tires(&mut self) {self.action(ShiftToSummerTires);}
}
// Auto implement `GenericCar`.
impl<T> GenericCar for T where T:
GetSet<LeftFrontTire> +
GetSet<RightFrontTire> +
GetSet<LeftBackTire> +
GetSet<RightBackTire> +
Action<ShiftToSummerTires> +
Action<ShiftToWinterTires>
{}
fn main() {
let mut car = Car {tires: [Tire {winter: false}; 4]};
car.shift_to_winter_tires();
println!("{:?}", car);
car.set_left_front_tire(Tire {winter: false});
println!("Left front tire: {:?}", car.left_front_tire());
}
|
{
for i in 0..4 {self.tires[i].winter = false}
}
|
identifier_body
|
car.rs
|
extern crate quack;
use quack::*;
#[derive(Copy, Clone, Debug)]
pub struct Tire {pub winter: bool}
#[derive(Debug, Clone)]
pub struct Car {pub tires: [Tire; 4]}
pub struct LeftFrontTire(pub Tire);
pub struct RightFrontTire(pub Tire);
pub struct LeftBackTire(pub Tire);
pub struct RightBackTire(pub Tire);
quack!{
for Car {
get_set LeftFrontTire(self.tires[0]),
get_set RightFrontTire(self.tires[1]),
get_set LeftBackTire(self.tires[2]),
get_set RightBackTire(self.tires[3]),
}
}
pub struct ShiftToWinterTires;
impl Action<ShiftToWinterTires> for Car {
type Result = ();
fn action(&mut self, _: ShiftToWinterTires) -> () {
for i in 0..4 {self.tires[i].winter = true}
}
}
pub struct ShiftToSummerTires;
impl Action<ShiftToSummerTires> for Car {
type Result = ();
fn action(&mut self, _: ShiftToSummerTires) -> () {
for i in 0..4 {self.tires[i].winter = false}
}
}
// Implement trait on top of duck type object.
pub trait GenericCar:
GetSet<LeftFrontTire> +
GetSet<RightFrontTire> +
GetSet<LeftBackTire> +
GetSet<RightBackTire> +
Action<ShiftToSummerTires> +
Action<ShiftToWinterTires>
{
fn left_front_tire(&self) -> Tire {Get::<LeftFrontTire>::get(self).0}
fn right_front_tire(&self) -> Tire {Get::<RightFrontTire>::get(self).0}
fn left_back_tire(&self) -> Tire {Get::<LeftBackTire>::get(self).0}
fn right_back_tire(&self) -> Tire {Get::<RightBackTire>::get(self).0}
fn set_left_front_tire(&mut self, val: Tire) {self.set(LeftFrontTire(val))}
fn set_right_front_tire(&mut self, val: Tire) {self.set(RightFrontTire(val))}
fn set_left_back_tire(&mut self, val: Tire) {self.set(LeftBackTire(val))}
fn set_right_back_tire(&mut self, val: Tire) {self.set(RightBackTire(val))}
fn shift_to_winter_tires(&mut self) {self.action(ShiftToWinterTires);}
fn shift_to_summer_tires(&mut self) {self.action(ShiftToSummerTires);}
}
// Auto implement `GenericCar`.
impl<T> GenericCar for T where T:
GetSet<LeftFrontTire> +
GetSet<RightFrontTire> +
GetSet<LeftBackTire> +
GetSet<RightBackTire> +
Action<ShiftToSummerTires> +
Action<ShiftToWinterTires>
{}
fn main() {
let mut car = Car {tires: [Tire {winter: false}; 4]};
car.shift_to_winter_tires();
println!("{:?}", car);
|
car.set_left_front_tire(Tire {winter: false});
println!("Left front tire: {:?}", car.left_front_tire());
}
|
random_line_split
|
|
legacy.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
//! `<input size>`, and so forth.
use std::sync::Arc;
use selectors::tree::TNode;
use selectors::matching::DeclarationBlock;
use node::TElementAttributes;
use properties::PropertyDeclaration;
use selector_matching::Stylist;
use smallvec::VecLike;
/// Legacy presentational attributes that take a nonnegative integer as defined in HTML5 § 2.4.4.2.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum UnsignedIntegerAttribute {
/// `<td colspan>`
ColSpan,
}
/// Extension methods for `Stylist` that cause rules to be synthesized for legacy attributes.
pub trait PresentationalHintSynthesis {
/// Synthesizes rules from various HTML attributes (mostly legacy junk from HTML4) that confer
/// *presentational hints* as defined in the HTML5 specification. This handles stuff like
/// `<body bgcolor>`, `<input size>`, `<td width>`, and so forth.
///
/// NB: Beware! If you add an attribute to this list, be sure to add it to
/// `common_style_affecting_attributes` or `rare_style_affecting_attributes` as appropriate. If
/// you don't, you risk strange random nondeterministic failures due to false positives in
/// style sharing.
fn synthesize_presentational_hints_for_legacy_attributes<'a,N,V>(
&self, node: &N, matching_rules_list: &mut V, shareable: &mut bool)
where N: TNode<'a>,
N::Element: TElementAttributes<'a>,
V: VecLike<DeclarationBlock<Vec<PropertyDeclaration>>>;
}
impl PresentationalHintSynthesis for Stylist {
fn synthesize_presentational_hints_for_legacy_attributes<'a,N,V>(
&self, node: &N, matching_rules_list: &mut V, shareable: &mut bool)
where N: TNode<'a>,
N::Element: TElementAttributes<'a>,
V: VecLike<DeclarationBlock<Vec<PropertyDeclaration>>> {
let element = node.as_element();
let length = matching_rules_list.len();
element.synthesize_presentational_hints_for_legacy_attributes(matching_rules_list);
if matching_rules_list.len()!= length {
// Never share style for elements with preshints
*shareable = false;
}
}
}
/// A convenience function to create a declaration block from a single declaration. This is
/// primarily used in `synthesize_rules_for_legacy_attributes`.
#[inline]
pub fn from_declaration(rule: PropertyDeclaration) -> DeclarationBlock<Vec<PropertyDeclaration>> {
DeclarationBlock::from_declarations(Arc::new(vec![rule]))
}
|
//! Legacy presentational attributes defined in the HTML5 specification: `<td width>`,
|
random_line_split
|
legacy.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Legacy presentational attributes defined in the HTML5 specification: `<td width>`,
//! `<input size>`, and so forth.
use std::sync::Arc;
use selectors::tree::TNode;
use selectors::matching::DeclarationBlock;
use node::TElementAttributes;
use properties::PropertyDeclaration;
use selector_matching::Stylist;
use smallvec::VecLike;
/// Legacy presentational attributes that take a nonnegative integer as defined in HTML5 § 2.4.4.2.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum UnsignedIntegerAttribute {
/// `<td colspan>`
ColSpan,
}
/// Extension methods for `Stylist` that cause rules to be synthesized for legacy attributes.
pub trait PresentationalHintSynthesis {
/// Synthesizes rules from various HTML attributes (mostly legacy junk from HTML4) that confer
/// *presentational hints* as defined in the HTML5 specification. This handles stuff like
/// `<body bgcolor>`, `<input size>`, `<td width>`, and so forth.
///
/// NB: Beware! If you add an attribute to this list, be sure to add it to
/// `common_style_affecting_attributes` or `rare_style_affecting_attributes` as appropriate. If
/// you don't, you risk strange random nondeterministic failures due to false positives in
/// style sharing.
fn synthesize_presentational_hints_for_legacy_attributes<'a,N,V>(
&self, node: &N, matching_rules_list: &mut V, shareable: &mut bool)
where N: TNode<'a>,
N::Element: TElementAttributes<'a>,
V: VecLike<DeclarationBlock<Vec<PropertyDeclaration>>>;
}
impl PresentationalHintSynthesis for Stylist {
fn synthesize_presentational_hints_for_legacy_attributes<'a,N,V>(
&self, node: &N, matching_rules_list: &mut V, shareable: &mut bool)
where N: TNode<'a>,
N::Element: TElementAttributes<'a>,
V: VecLike<DeclarationBlock<Vec<PropertyDeclaration>>> {
|
}
/// A convenience function to create a declaration block from a single declaration. This is
/// primarily used in `synthesize_rules_for_legacy_attributes`.
#[inline]
pub fn from_declaration(rule: PropertyDeclaration) -> DeclarationBlock<Vec<PropertyDeclaration>> {
DeclarationBlock::from_declarations(Arc::new(vec![rule]))
}
|
let element = node.as_element();
let length = matching_rules_list.len();
element.synthesize_presentational_hints_for_legacy_attributes(matching_rules_list);
if matching_rules_list.len() != length {
// Never share style for elements with preshints
*shareable = false;
}
}
|
identifier_body
|
legacy.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Legacy presentational attributes defined in the HTML5 specification: `<td width>`,
//! `<input size>`, and so forth.
use std::sync::Arc;
use selectors::tree::TNode;
use selectors::matching::DeclarationBlock;
use node::TElementAttributes;
use properties::PropertyDeclaration;
use selector_matching::Stylist;
use smallvec::VecLike;
/// Legacy presentational attributes that take a nonnegative integer as defined in HTML5 § 2.4.4.2.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum UnsignedIntegerAttribute {
/// `<td colspan>`
ColSpan,
}
/// Extension methods for `Stylist` that cause rules to be synthesized for legacy attributes.
pub trait PresentationalHintSynthesis {
/// Synthesizes rules from various HTML attributes (mostly legacy junk from HTML4) that confer
/// *presentational hints* as defined in the HTML5 specification. This handles stuff like
/// `<body bgcolor>`, `<input size>`, `<td width>`, and so forth.
///
/// NB: Beware! If you add an attribute to this list, be sure to add it to
/// `common_style_affecting_attributes` or `rare_style_affecting_attributes` as appropriate. If
/// you don't, you risk strange random nondeterministic failures due to false positives in
/// style sharing.
fn synthesize_presentational_hints_for_legacy_attributes<'a,N,V>(
&self, node: &N, matching_rules_list: &mut V, shareable: &mut bool)
where N: TNode<'a>,
N::Element: TElementAttributes<'a>,
V: VecLike<DeclarationBlock<Vec<PropertyDeclaration>>>;
}
impl PresentationalHintSynthesis for Stylist {
fn synthesize_presentational_hints_for_legacy_attributes<'a,N,V>(
&self, node: &N, matching_rules_list: &mut V, shareable: &mut bool)
where N: TNode<'a>,
N::Element: TElementAttributes<'a>,
V: VecLike<DeclarationBlock<Vec<PropertyDeclaration>>> {
let element = node.as_element();
let length = matching_rules_list.len();
element.synthesize_presentational_hints_for_legacy_attributes(matching_rules_list);
if matching_rules_list.len()!= length {
// Never share style for elements with preshints
*shareable = false;
}
}
}
/// A convenience function to create a declaration block from a single declaration. This is
/// primarily used in `synthesize_rules_for_legacy_attributes`.
#[inline]
pub fn f
|
rule: PropertyDeclaration) -> DeclarationBlock<Vec<PropertyDeclaration>> {
DeclarationBlock::from_declarations(Arc::new(vec![rule]))
}
|
rom_declaration(
|
identifier_name
|
legacy.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Legacy presentational attributes defined in the HTML5 specification: `<td width>`,
//! `<input size>`, and so forth.
use std::sync::Arc;
use selectors::tree::TNode;
use selectors::matching::DeclarationBlock;
use node::TElementAttributes;
use properties::PropertyDeclaration;
use selector_matching::Stylist;
use smallvec::VecLike;
/// Legacy presentational attributes that take a nonnegative integer as defined in HTML5 § 2.4.4.2.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum UnsignedIntegerAttribute {
/// `<td colspan>`
ColSpan,
}
/// Extension methods for `Stylist` that cause rules to be synthesized for legacy attributes.
pub trait PresentationalHintSynthesis {
/// Synthesizes rules from various HTML attributes (mostly legacy junk from HTML4) that confer
/// *presentational hints* as defined in the HTML5 specification. This handles stuff like
/// `<body bgcolor>`, `<input size>`, `<td width>`, and so forth.
///
/// NB: Beware! If you add an attribute to this list, be sure to add it to
/// `common_style_affecting_attributes` or `rare_style_affecting_attributes` as appropriate. If
/// you don't, you risk strange random nondeterministic failures due to false positives in
/// style sharing.
fn synthesize_presentational_hints_for_legacy_attributes<'a,N,V>(
&self, node: &N, matching_rules_list: &mut V, shareable: &mut bool)
where N: TNode<'a>,
N::Element: TElementAttributes<'a>,
V: VecLike<DeclarationBlock<Vec<PropertyDeclaration>>>;
}
impl PresentationalHintSynthesis for Stylist {
fn synthesize_presentational_hints_for_legacy_attributes<'a,N,V>(
&self, node: &N, matching_rules_list: &mut V, shareable: &mut bool)
where N: TNode<'a>,
N::Element: TElementAttributes<'a>,
V: VecLike<DeclarationBlock<Vec<PropertyDeclaration>>> {
let element = node.as_element();
let length = matching_rules_list.len();
element.synthesize_presentational_hints_for_legacy_attributes(matching_rules_list);
if matching_rules_list.len()!= length {
|
}
}
/// A convenience function to create a declaration block from a single declaration. This is
/// primarily used in `synthesize_rules_for_legacy_attributes`.
#[inline]
pub fn from_declaration(rule: PropertyDeclaration) -> DeclarationBlock<Vec<PropertyDeclaration>> {
DeclarationBlock::from_declarations(Arc::new(vec![rule]))
}
|
// Never share style for elements with preshints
*shareable = false;
}
|
conditional_block
|
watcher.rs
|
// Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
// A trivial interface for extracting information from a transact as it happens.
// We have two situations in which we need to do this:
//
// - InProgress and Conn both have attribute caches. InProgress's is different from Conn's,
// because it needs to be able to roll back. These wish to see changes in a certain set of
// attributes in order to synchronously update the cache during a write.
// - When observers are registered we want to flip some flags as writes occur so that we can
// notifying them outside the transaction.
use core_traits::{
Entid,
TypedValue,
};
use mentat_core::{
Schema,
};
use edn::entities::{
OpType,
};
use db_traits::errors::{
Result,
};
pub trait TransactWatcher {
fn datom(&mut self, op: OpType, e: Entid, a: Entid, v: &TypedValue);
/// Only return an error if you want to interrupt the transact!
/// Called with the schema _prior to_ the transact -- any attributes or
/// attribute changes transacted during this transact are not reflected in
/// the schema.
fn done(&mut self, t: &Entid, schema: &Schema) -> Result<()>;
}
pub struct NullWatcher();
impl TransactWatcher for NullWatcher {
fn datom(&mut self, _op: OpType, _e: Entid, _a: Entid, _v: &TypedValue) {
}
fn
|
(&mut self, _t: &Entid, _schema: &Schema) -> Result<()> {
Ok(())
}
}
|
done
|
identifier_name
|
watcher.rs
|
// Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
// A trivial interface for extracting information from a transact as it happens.
// We have two situations in which we need to do this:
//
// - InProgress and Conn both have attribute caches. InProgress's is different from Conn's,
// because it needs to be able to roll back. These wish to see changes in a certain set of
// attributes in order to synchronously update the cache during a write.
// - When observers are registered we want to flip some flags as writes occur so that we can
// notifying them outside the transaction.
use core_traits::{
Entid,
TypedValue,
};
use mentat_core::{
Schema,
};
use edn::entities::{
OpType,
};
|
pub trait TransactWatcher {
fn datom(&mut self, op: OpType, e: Entid, a: Entid, v: &TypedValue);
/// Only return an error if you want to interrupt the transact!
/// Called with the schema _prior to_ the transact -- any attributes or
/// attribute changes transacted during this transact are not reflected in
/// the schema.
fn done(&mut self, t: &Entid, schema: &Schema) -> Result<()>;
}
pub struct NullWatcher();
impl TransactWatcher for NullWatcher {
fn datom(&mut self, _op: OpType, _e: Entid, _a: Entid, _v: &TypedValue) {
}
fn done(&mut self, _t: &Entid, _schema: &Schema) -> Result<()> {
Ok(())
}
}
|
use db_traits::errors::{
Result,
};
|
random_line_split
|
watcher.rs
|
// Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
// A trivial interface for extracting information from a transact as it happens.
// We have two situations in which we need to do this:
//
// - InProgress and Conn both have attribute caches. InProgress's is different from Conn's,
// because it needs to be able to roll back. These wish to see changes in a certain set of
// attributes in order to synchronously update the cache during a write.
// - When observers are registered we want to flip some flags as writes occur so that we can
// notifying them outside the transaction.
use core_traits::{
Entid,
TypedValue,
};
use mentat_core::{
Schema,
};
use edn::entities::{
OpType,
};
use db_traits::errors::{
Result,
};
pub trait TransactWatcher {
fn datom(&mut self, op: OpType, e: Entid, a: Entid, v: &TypedValue);
/// Only return an error if you want to interrupt the transact!
/// Called with the schema _prior to_ the transact -- any attributes or
/// attribute changes transacted during this transact are not reflected in
/// the schema.
fn done(&mut self, t: &Entid, schema: &Schema) -> Result<()>;
}
pub struct NullWatcher();
impl TransactWatcher for NullWatcher {
fn datom(&mut self, _op: OpType, _e: Entid, _a: Entid, _v: &TypedValue) {
}
fn done(&mut self, _t: &Entid, _schema: &Schema) -> Result<()>
|
}
|
{
Ok(())
}
|
identifier_body
|
llp.rs
|
#[doc = "Register `LLP` reader"]
pub struct R(crate::R<LLP_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<LLP_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<LLP_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<LLP_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `LLP` writer"]
pub struct W(crate::W<LLP_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<LLP_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<LLP_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<LLP_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `LOC` reader - Starting Address In Memory"]
pub struct LOC_R(crate::FieldReader<u32, u32>);
impl LOC_R {
pub(crate) fn new(bits: u32) -> Self {
LOC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for LOC_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `LOC` writer - Starting Address In Memory"]
pub struct LOC_W<'a> {
w: &'a mut W,
}
impl<'a> LOC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits &!(0x3fff_ffff << 2)) | ((value as u32 & 0x3fff_ffff) << 2);
self.w
}
}
impl R {
#[doc = "Bits 2:31 - Starting Address In Memory"]
#[inline(always)]
pub fn loc(&self) -> LOC_R {
LOC_R::new(((self.bits >> 2) & 0x3fff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 2:31 - Starting Address In Memory"]
#[inline(always)]
pub fn loc(&mut self) -> LOC_W {
LOC_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Linked List Pointer Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [llp](index.html) module"]
pub struct
|
;
impl crate::RegisterSpec for LLP_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [llp::R](R) reader structure"]
impl crate::Readable for LLP_SPEC {
type Reader = R;
}
#[doc = "`write(|w|..)` method takes [llp::W](W) writer structure"]
impl crate::Writable for LLP_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets LLP to value 0"]
impl crate::Resettable for LLP_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
LLP_SPEC
|
identifier_name
|
llp.rs
|
#[doc = "Register `LLP` reader"]
pub struct R(crate::R<LLP_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<LLP_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<LLP_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<LLP_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `LLP` writer"]
pub struct W(crate::W<LLP_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<LLP_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<LLP_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<LLP_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `LOC` reader - Starting Address In Memory"]
pub struct LOC_R(crate::FieldReader<u32, u32>);
impl LOC_R {
pub(crate) fn new(bits: u32) -> Self {
LOC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for LOC_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `LOC` writer - Starting Address In Memory"]
pub struct LOC_W<'a> {
w: &'a mut W,
}
impl<'a> LOC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W
|
}
impl R {
#[doc = "Bits 2:31 - Starting Address In Memory"]
#[inline(always)]
pub fn loc(&self) -> LOC_R {
LOC_R::new(((self.bits >> 2) & 0x3fff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 2:31 - Starting Address In Memory"]
#[inline(always)]
pub fn loc(&mut self) -> LOC_W {
LOC_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Linked List Pointer Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [llp](index.html) module"]
pub struct LLP_SPEC;
impl crate::RegisterSpec for LLP_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [llp::R](R) reader structure"]
impl crate::Readable for LLP_SPEC {
type Reader = R;
}
#[doc = "`write(|w|..)` method takes [llp::W](W) writer structure"]
impl crate::Writable for LLP_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets LLP to value 0"]
impl crate::Resettable for LLP_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
{
self.w.bits = (self.w.bits & !(0x3fff_ffff << 2)) | ((value as u32 & 0x3fff_ffff) << 2);
self.w
}
|
identifier_body
|
llp.rs
|
#[doc = "Register `LLP` reader"]
pub struct R(crate::R<LLP_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<LLP_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<LLP_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<LLP_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `LLP` writer"]
pub struct W(crate::W<LLP_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<LLP_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<LLP_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<LLP_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `LOC` reader - Starting Address In Memory"]
pub struct LOC_R(crate::FieldReader<u32, u32>);
impl LOC_R {
pub(crate) fn new(bits: u32) -> Self {
LOC_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for LOC_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `LOC` writer - Starting Address In Memory"]
pub struct LOC_W<'a> {
w: &'a mut W,
}
impl<'a> LOC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits &!(0x3fff_ffff << 2)) | ((value as u32 & 0x3fff_ffff) << 2);
self.w
}
}
impl R {
#[doc = "Bits 2:31 - Starting Address In Memory"]
#[inline(always)]
pub fn loc(&self) -> LOC_R {
LOC_R::new(((self.bits >> 2) & 0x3fff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 2:31 - Starting Address In Memory"]
#[inline(always)]
pub fn loc(&mut self) -> LOC_W {
LOC_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Linked List Pointer Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [llp](index.html) module"]
pub struct LLP_SPEC;
impl crate::RegisterSpec for LLP_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [llp::R](R) reader structure"]
impl crate::Readable for LLP_SPEC {
type Reader = R;
}
#[doc = "`write(|w|..)` method takes [llp::W](W) writer structure"]
impl crate::Writable for LLP_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets LLP to value 0"]
impl crate::Resettable for LLP_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
|
}
|
}
|
random_line_split
|
bsp_renderer.rs
|
/*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/bsp_renderer.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only in development to testing
the loading and rendering of Quake's
BSP maps.
*/
use super::{ State, Game_Renderer };
use std::mem;
use gl2 = opengles::gl2;
use gfx;
use glfw;
use ui;
use math;
use obj::bsp;
use log::Log;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
pub struct BSP_Renderer
{
game_renderer: @mut Game_Renderer,
vao: gl2::GLuint,
vbo: gl2::GLuint,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
}
impl BSP_Renderer
{
pub fn new(game_renderer: @mut Game_Renderer) -> @mut BSP_Renderer
{
let gr = @mut BSP_Renderer
{
game_renderer: game_renderer,
vao: 0,
vbo: 0,
shader: gfx::Shader_Builder::new_with_files("data/shaders/color.vert", "data/shaders/color.frag"),
proj_loc: 0,
world_loc: 0,
};
gr.upload();
gr
}
fn upload(&mut self)
{
let name = check!(gl2::gen_vertex_arrays(1));
log_assert!(name.len() == 1);
self.vao = name[0];
let name = check!(gl2::gen_buffers(1));
log_assert!(name.len() == 1);
self.vbo = name[0];
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, self.game_renderer.game.bsp_map.verts, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false,
mem::size_of::<bsp::lump::Vertex>() as i32,
0));
check!(gl2::vertex_attrib_pointer_u8(1, 4, true,
mem::size_of::<bsp::lump::Vertex>() as i32,
mem::size_of::<bsp::lump::Vertex>() as u32 -
mem::size_of::<math::Vec4u8>() as u32));
}
fn render_mesh(&self)
{
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::draw_arrays(gl2::TRIANGLES, 0, self.game_renderer.game.bsp_map.verts.len() as i32));
check!(gl2::bind_vertex_array(0));
}
}
impl State for BSP_Renderer
{
fn load(&mut self)
{
log_debug!("Loading bsp renderer state.");
self.game_renderer.camera.show_fps = true;
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
}
fn unload(&mut self)
{
log_debug!("Unloading bsp renderer state.");
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vbo]));
}
fn get_key(&self) -> &str
{ &"bsp_renderer" }
fn update(&mut self, delta: f32) -> bool /* dt is in terms of seconds. */
{
self.game_renderer.camera.update(delta);
false
}
fn render(&mut self) -> bool
{
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &self.game_renderer.camera.projection);
self.shader.update_uniform_mat(self.world_loc, &self.game_renderer.camera.view);
self.render_mesh();
let fps = self.game_renderer.camera.frame_rate;
let ui_renderer = ui::Renderer::get();
ui_renderer.begin();
{
if self.game_renderer.camera.show_fps
|
} ui_renderer.end();
false
}
fn key_action(&mut self, key: glfw::Key, action: glfw::Action, _mods: glfw::Modifiers) -> bool
{ (self.game_renderer.camera as @mut State).key_action(key, action, _mods) }
fn mouse_moved(&mut self, x: f32, y: f32) -> bool
{ (self.game_renderer.camera as @mut State).mouse_moved(x, y) }
}
|
{
ui_renderer.render_font(
format!("{}", fps),
math::Vec2f::new(self.game_renderer.camera.window_size.x as f32 - 40.0, 0.0),
&self.game_renderer.fps_font);
}
|
conditional_block
|
bsp_renderer.rs
|
/*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/bsp_renderer.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only in development to testing
the loading and rendering of Quake's
BSP maps.
*/
use super::{ State, Game_Renderer };
use std::mem;
use gl2 = opengles::gl2;
use gfx;
use glfw;
use ui;
use math;
use obj::bsp;
use log::Log;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
pub struct BSP_Renderer
{
game_renderer: @mut Game_Renderer,
vao: gl2::GLuint,
vbo: gl2::GLuint,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
}
impl BSP_Renderer
{
pub fn new(game_renderer: @mut Game_Renderer) -> @mut BSP_Renderer
{
let gr = @mut BSP_Renderer
{
game_renderer: game_renderer,
vao: 0,
vbo: 0,
shader: gfx::Shader_Builder::new_with_files("data/shaders/color.vert", "data/shaders/color.frag"),
proj_loc: 0,
world_loc: 0,
};
gr.upload();
gr
}
fn upload(&mut self)
{
let name = check!(gl2::gen_vertex_arrays(1));
log_assert!(name.len() == 1);
self.vao = name[0];
let name = check!(gl2::gen_buffers(1));
log_assert!(name.len() == 1);
self.vbo = name[0];
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, self.game_renderer.game.bsp_map.verts, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false,
mem::size_of::<bsp::lump::Vertex>() as i32,
0));
check!(gl2::vertex_attrib_pointer_u8(1, 4, true,
mem::size_of::<bsp::lump::Vertex>() as i32,
mem::size_of::<bsp::lump::Vertex>() as u32 -
mem::size_of::<math::Vec4u8>() as u32));
}
fn render_mesh(&self)
{
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::draw_arrays(gl2::TRIANGLES, 0, self.game_renderer.game.bsp_map.verts.len() as i32));
check!(gl2::bind_vertex_array(0));
}
}
|
log_debug!("Loading bsp renderer state.");
self.game_renderer.camera.show_fps = true;
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
}
fn unload(&mut self)
{
log_debug!("Unloading bsp renderer state.");
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vbo]));
}
fn get_key(&self) -> &str
{ &"bsp_renderer" }
fn update(&mut self, delta: f32) -> bool /* dt is in terms of seconds. */
{
self.game_renderer.camera.update(delta);
false
}
fn render(&mut self) -> bool
{
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &self.game_renderer.camera.projection);
self.shader.update_uniform_mat(self.world_loc, &self.game_renderer.camera.view);
self.render_mesh();
let fps = self.game_renderer.camera.frame_rate;
let ui_renderer = ui::Renderer::get();
ui_renderer.begin();
{
if self.game_renderer.camera.show_fps
{
ui_renderer.render_font(
format!("{}", fps),
math::Vec2f::new(self.game_renderer.camera.window_size.x as f32 - 40.0, 0.0),
&self.game_renderer.fps_font);
}
} ui_renderer.end();
false
}
fn key_action(&mut self, key: glfw::Key, action: glfw::Action, _mods: glfw::Modifiers) -> bool
{ (self.game_renderer.camera as @mut State).key_action(key, action, _mods) }
fn mouse_moved(&mut self, x: f32, y: f32) -> bool
{ (self.game_renderer.camera as @mut State).mouse_moved(x, y) }
}
|
impl State for BSP_Renderer
{
fn load(&mut self)
{
|
random_line_split
|
bsp_renderer.rs
|
/*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/bsp_renderer.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only in development to testing
the loading and rendering of Quake's
BSP maps.
*/
use super::{ State, Game_Renderer };
use std::mem;
use gl2 = opengles::gl2;
use gfx;
use glfw;
use ui;
use math;
use obj::bsp;
use log::Log;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
pub struct BSP_Renderer
{
game_renderer: @mut Game_Renderer,
vao: gl2::GLuint,
vbo: gl2::GLuint,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
}
impl BSP_Renderer
{
pub fn new(game_renderer: @mut Game_Renderer) -> @mut BSP_Renderer
{
let gr = @mut BSP_Renderer
{
game_renderer: game_renderer,
vao: 0,
vbo: 0,
shader: gfx::Shader_Builder::new_with_files("data/shaders/color.vert", "data/shaders/color.frag"),
proj_loc: 0,
world_loc: 0,
};
gr.upload();
gr
}
fn upload(&mut self)
{
let name = check!(gl2::gen_vertex_arrays(1));
log_assert!(name.len() == 1);
self.vao = name[0];
let name = check!(gl2::gen_buffers(1));
log_assert!(name.len() == 1);
self.vbo = name[0];
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, self.game_renderer.game.bsp_map.verts, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false,
mem::size_of::<bsp::lump::Vertex>() as i32,
0));
check!(gl2::vertex_attrib_pointer_u8(1, 4, true,
mem::size_of::<bsp::lump::Vertex>() as i32,
mem::size_of::<bsp::lump::Vertex>() as u32 -
mem::size_of::<math::Vec4u8>() as u32));
}
fn render_mesh(&self)
{
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::draw_arrays(gl2::TRIANGLES, 0, self.game_renderer.game.bsp_map.verts.len() as i32));
check!(gl2::bind_vertex_array(0));
}
}
impl State for BSP_Renderer
{
fn load(&mut self)
{
log_debug!("Loading bsp renderer state.");
self.game_renderer.camera.show_fps = true;
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
}
fn unload(&mut self)
{
log_debug!("Unloading bsp renderer state.");
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vbo]));
}
fn get_key(&self) -> &str
{ &"bsp_renderer" }
fn update(&mut self, delta: f32) -> bool /* dt is in terms of seconds. */
{
self.game_renderer.camera.update(delta);
false
}
fn render(&mut self) -> bool
{
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &self.game_renderer.camera.projection);
self.shader.update_uniform_mat(self.world_loc, &self.game_renderer.camera.view);
self.render_mesh();
let fps = self.game_renderer.camera.frame_rate;
let ui_renderer = ui::Renderer::get();
ui_renderer.begin();
{
if self.game_renderer.camera.show_fps
{
ui_renderer.render_font(
format!("{}", fps),
math::Vec2f::new(self.game_renderer.camera.window_size.x as f32 - 40.0, 0.0),
&self.game_renderer.fps_font);
}
} ui_renderer.end();
false
}
fn key_action(&mut self, key: glfw::Key, action: glfw::Action, _mods: glfw::Modifiers) -> bool
{ (self.game_renderer.camera as @mut State).key_action(key, action, _mods) }
fn mouse_moved(&mut self, x: f32, y: f32) -> bool
|
}
|
{ (self.game_renderer.camera as @mut State).mouse_moved(x, y) }
|
identifier_body
|
bsp_renderer.rs
|
/*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/bsp_renderer.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only in development to testing
the loading and rendering of Quake's
BSP maps.
*/
use super::{ State, Game_Renderer };
use std::mem;
use gl2 = opengles::gl2;
use gfx;
use glfw;
use ui;
use math;
use obj::bsp;
use log::Log;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
pub struct BSP_Renderer
{
game_renderer: @mut Game_Renderer,
vao: gl2::GLuint,
vbo: gl2::GLuint,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
}
impl BSP_Renderer
{
pub fn new(game_renderer: @mut Game_Renderer) -> @mut BSP_Renderer
{
let gr = @mut BSP_Renderer
{
game_renderer: game_renderer,
vao: 0,
vbo: 0,
shader: gfx::Shader_Builder::new_with_files("data/shaders/color.vert", "data/shaders/color.frag"),
proj_loc: 0,
world_loc: 0,
};
gr.upload();
gr
}
fn upload(&mut self)
{
let name = check!(gl2::gen_vertex_arrays(1));
log_assert!(name.len() == 1);
self.vao = name[0];
let name = check!(gl2::gen_buffers(1));
log_assert!(name.len() == 1);
self.vbo = name[0];
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, self.game_renderer.game.bsp_map.verts, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false,
mem::size_of::<bsp::lump::Vertex>() as i32,
0));
check!(gl2::vertex_attrib_pointer_u8(1, 4, true,
mem::size_of::<bsp::lump::Vertex>() as i32,
mem::size_of::<bsp::lump::Vertex>() as u32 -
mem::size_of::<math::Vec4u8>() as u32));
}
fn render_mesh(&self)
{
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::draw_arrays(gl2::TRIANGLES, 0, self.game_renderer.game.bsp_map.verts.len() as i32));
check!(gl2::bind_vertex_array(0));
}
}
impl State for BSP_Renderer
{
fn load(&mut self)
{
log_debug!("Loading bsp renderer state.");
self.game_renderer.camera.show_fps = true;
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
}
fn
|
(&mut self)
{
log_debug!("Unloading bsp renderer state.");
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vbo]));
}
fn get_key(&self) -> &str
{ &"bsp_renderer" }
fn update(&mut self, delta: f32) -> bool /* dt is in terms of seconds. */
{
self.game_renderer.camera.update(delta);
false
}
fn render(&mut self) -> bool
{
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &self.game_renderer.camera.projection);
self.shader.update_uniform_mat(self.world_loc, &self.game_renderer.camera.view);
self.render_mesh();
let fps = self.game_renderer.camera.frame_rate;
let ui_renderer = ui::Renderer::get();
ui_renderer.begin();
{
if self.game_renderer.camera.show_fps
{
ui_renderer.render_font(
format!("{}", fps),
math::Vec2f::new(self.game_renderer.camera.window_size.x as f32 - 40.0, 0.0),
&self.game_renderer.fps_font);
}
} ui_renderer.end();
false
}
fn key_action(&mut self, key: glfw::Key, action: glfw::Action, _mods: glfw::Modifiers) -> bool
{ (self.game_renderer.camera as @mut State).key_action(key, action, _mods) }
fn mouse_moved(&mut self, x: f32, y: f32) -> bool
{ (self.game_renderer.camera as @mut State).mouse_moved(x, y) }
}
|
unload
|
identifier_name
|
map2one.rs
|
use itertools::izip;
use mli::{Backward, Forward, Train};
use ndarray::{Array, Array2};
use num_traits::Zero;
use std::ops::Add;
#[derive(Clone, Debug)]
pub struct Map2One<G>(pub G);
impl<G> Forward for Map2One<G>
where
G: Forward,
{
type Input = Array2<G::Input>;
type Internal = Array2<G::Internal>;
type Output = Array2<G::Output>;
fn forward(&self, input: &Self::Input) -> (Self::Internal, Self::Output) {
let both_vec: Vec<(G::Internal, G::Output)> =
input.iter().map(|input| self.0.forward(input)).collect();
let (internal_vec, output_vec) = both_vec.into_iter().fold(
(vec![], vec![]),
|(mut internal_vec, mut output_vec), (internal, output)| {
internal_vec.push(internal);
output_vec.push(output);
(internal_vec, output_vec)
},
);
let internal_array = Array::from_shape_vec(input.raw_dim(), internal_vec).unwrap();
let output_array = Array::from_shape_vec(input.raw_dim(), output_vec).unwrap();
(internal_array, output_array)
}
}
impl<G> Backward for Map2One<G>
where
G: Backward,
G::TrainDelta: Clone + Add + Zero,
{
type OutputDelta = Array2<G::OutputDelta>;
type InputDelta = Array2<G::InputDelta>;
type TrainDelta = G::TrainDelta;
fn
|
(
&self,
input: &Self::Input,
internal: &Self::Internal,
output_delta: &Self::OutputDelta,
) -> (Self::InputDelta, Self::TrainDelta) {
let both_vec: Vec<(G::InputDelta, G::TrainDelta)> =
izip!(input.iter(), internal.iter(), output_delta.iter(),)
.map(|(input, internal, output_delta)| {
self.0.backward(input, internal, output_delta)
})
.collect();
let (input_delta_vec, train_delta_vec) = both_vec.into_iter().fold(
(vec![], vec![]),
|(mut input_delta_vec, mut train_delta_vec), (input_delta, train_delta)| {
input_delta_vec.push(input_delta);
train_delta_vec.push(train_delta);
(input_delta_vec, train_delta_vec)
},
);
let input_delta_array = Array::from_shape_vec(input.raw_dim(), input_delta_vec).unwrap();
let train_delta_array = Array::from_shape_vec(input.raw_dim(), train_delta_vec).unwrap();
(input_delta_array, train_delta_array.sum())
}
}
impl<G> Train for Map2One<G>
where
G: Train,
G::TrainDelta: Clone + Add + Zero,
{
fn train(&mut self, train_delta: &Self::TrainDelta) {
self.0.train(&train_delta);
}
}
|
backward
|
identifier_name
|
map2one.rs
|
use itertools::izip;
use mli::{Backward, Forward, Train};
use ndarray::{Array, Array2};
use num_traits::Zero;
use std::ops::Add;
#[derive(Clone, Debug)]
pub struct Map2One<G>(pub G);
impl<G> Forward for Map2One<G>
where
G: Forward,
{
type Input = Array2<G::Input>;
type Internal = Array2<G::Internal>;
type Output = Array2<G::Output>;
fn forward(&self, input: &Self::Input) -> (Self::Internal, Self::Output) {
let both_vec: Vec<(G::Internal, G::Output)> =
input.iter().map(|input| self.0.forward(input)).collect();
let (internal_vec, output_vec) = both_vec.into_iter().fold(
(vec![], vec![]),
|(mut internal_vec, mut output_vec), (internal, output)| {
internal_vec.push(internal);
output_vec.push(output);
(internal_vec, output_vec)
},
);
let internal_array = Array::from_shape_vec(input.raw_dim(), internal_vec).unwrap();
let output_array = Array::from_shape_vec(input.raw_dim(), output_vec).unwrap();
(internal_array, output_array)
}
}
impl<G> Backward for Map2One<G>
where
G: Backward,
G::TrainDelta: Clone + Add + Zero,
{
|
fn backward(
&self,
input: &Self::Input,
internal: &Self::Internal,
output_delta: &Self::OutputDelta,
) -> (Self::InputDelta, Self::TrainDelta) {
let both_vec: Vec<(G::InputDelta, G::TrainDelta)> =
izip!(input.iter(), internal.iter(), output_delta.iter(),)
.map(|(input, internal, output_delta)| {
self.0.backward(input, internal, output_delta)
})
.collect();
let (input_delta_vec, train_delta_vec) = both_vec.into_iter().fold(
(vec![], vec![]),
|(mut input_delta_vec, mut train_delta_vec), (input_delta, train_delta)| {
input_delta_vec.push(input_delta);
train_delta_vec.push(train_delta);
(input_delta_vec, train_delta_vec)
},
);
let input_delta_array = Array::from_shape_vec(input.raw_dim(), input_delta_vec).unwrap();
let train_delta_array = Array::from_shape_vec(input.raw_dim(), train_delta_vec).unwrap();
(input_delta_array, train_delta_array.sum())
}
}
impl<G> Train for Map2One<G>
where
G: Train,
G::TrainDelta: Clone + Add + Zero,
{
fn train(&mut self, train_delta: &Self::TrainDelta) {
self.0.train(&train_delta);
}
}
|
type OutputDelta = Array2<G::OutputDelta>;
type InputDelta = Array2<G::InputDelta>;
type TrainDelta = G::TrainDelta;
|
random_line_split
|
issue-42234-unknown-receiver-type.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// When the type of a method call's receiver is unknown, the span should point
// to the receiver (and not the entire call, as was previously the case before
// the fix of which this tests).
fn shines_a_beacon_through_the_darkness() {
let x: Option<_> = None;
x.unwrap().method_that_could_exist_on_some_type();
//~^ ERROR 17:5: 17:15: type annotations needed
}
fn courier_to_des_moines_and_points_west(data: &[u32]) -> String {
data.iter() //~ ERROR 22:5: 23:20: type annotations needed
.sum::<_>()
.to_string()
}
|
fn main() {}
|
random_line_split
|
|
issue-42234-unknown-receiver-type.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// When the type of a method call's receiver is unknown, the span should point
// to the receiver (and not the entire call, as was previously the case before
// the fix of which this tests).
fn shines_a_beacon_through_the_darkness() {
let x: Option<_> = None;
x.unwrap().method_that_could_exist_on_some_type();
//~^ ERROR 17:5: 17:15: type annotations needed
}
fn courier_to_des_moines_and_points_west(data: &[u32]) -> String {
data.iter() //~ ERROR 22:5: 23:20: type annotations needed
.sum::<_>()
.to_string()
}
fn
|
() {}
|
main
|
identifier_name
|
issue-42234-unknown-receiver-type.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// When the type of a method call's receiver is unknown, the span should point
// to the receiver (and not the entire call, as was previously the case before
// the fix of which this tests).
fn shines_a_beacon_through_the_darkness() {
let x: Option<_> = None;
x.unwrap().method_that_could_exist_on_some_type();
//~^ ERROR 17:5: 17:15: type annotations needed
}
fn courier_to_des_moines_and_points_west(data: &[u32]) -> String
|
fn main() {}
|
{
data.iter() //~ ERROR 22:5: 23:20: type annotations needed
.sum::<_>()
.to_string()
}
|
identifier_body
|
lib.rs
|
#![feature(abi_x86_interrupt)]
#![feature(asm)]
#![feature(const_fn, unique)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![no_std]
#![feature(alloc, collections)]
#![feature(core_intrinsics)]
extern crate bit_field;
#[macro_use]
extern crate bitflags;
extern crate raw_cpuid;
#[macro_use]
extern crate lazy_static;
extern crate multiboot2;
extern crate rlibc;
extern crate spin;
extern crate volatile;
extern crate x86_64;
extern crate hole_list_allocator;
extern crate alloc;
extern crate collections;
#[macro_use]
extern crate once;
// Make constants public
pub use consts::*;
#[macro_use]
/// Console handling
pub mod vga_buffer;
/// Kernel message writer
pub mod kernel_messaging;
/// ACPI manager
pub mod acpi;
/// Architecture constants
pub mod consts;
/// Architecture context
pub mod context;
/// Devices management
pub mod device;
/// Memory management
pub mod memory;
/// Interrupt instructions
pub mod interrupts;
/// Initialization and start function
pub mod start;
/// Timer functions
pub mod time;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn
|
() {}
#[cfg(not(test))]
#[lang = "panic_fmt"]
#[no_mangle]
pub extern "C" fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line: u32) ->! {
println!("\n\nPANIC in {} at line {}:", file, line);
println!(" {}", fmt);
loop {}
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn _Unwind_Resume() ->! {
loop {}
}
/// Enter in usermode.
///
/// This functions never returns.
pub unsafe fn usermode(ip: usize, sp: usize) ->! {
asm!("
mov ds, ax
mov es, ax
mov fs, bx
mov gs, ax
push rax
push rcx
push rdx
push rsi
push rdi
iretq"
:
: "{rax}"(5 << 3 | 3) // Data segment
"{rbx}"(6 << 3 | 3) // TLS segment
"{rcx}"(sp) // Stack pointer
"{rdx}"(3 << 12 | 1 << 9) // Flags - Set IOPL and interrupt enable flag
"{rsi}"(4 << 3 | 3) // Code segment
"{rdi}"(ip) // Instruction Pointer
:
: "intel", "volatile"
);
unreachable!();
}
|
eh_personality
|
identifier_name
|
lib.rs
|
#![feature(abi_x86_interrupt)]
#![feature(asm)]
#![feature(const_fn, unique)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![no_std]
#![feature(alloc, collections)]
#![feature(core_intrinsics)]
extern crate bit_field;
#[macro_use]
extern crate bitflags;
extern crate raw_cpuid;
#[macro_use]
extern crate lazy_static;
extern crate multiboot2;
extern crate rlibc;
extern crate spin;
extern crate volatile;
extern crate x86_64;
extern crate hole_list_allocator;
extern crate alloc;
extern crate collections;
#[macro_use]
extern crate once;
// Make constants public
pub use consts::*;
#[macro_use]
/// Console handling
pub mod vga_buffer;
/// Kernel message writer
pub mod kernel_messaging;
/// ACPI manager
pub mod acpi;
/// Architecture constants
pub mod consts;
/// Architecture context
pub mod context;
/// Devices management
pub mod device;
/// Memory management
pub mod memory;
/// Interrupt instructions
pub mod interrupts;
/// Initialization and start function
pub mod start;
/// Timer functions
pub mod time;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
#[lang = "panic_fmt"]
#[no_mangle]
pub extern "C" fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line: u32) ->! {
println!("\n\nPANIC in {} at line {}:", file, line);
println!(" {}", fmt);
loop {}
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn _Unwind_Resume() ->!
|
/// Enter in usermode.
///
/// This functions never returns.
pub unsafe fn usermode(ip: usize, sp: usize) ->! {
asm!("
mov ds, ax
mov es, ax
mov fs, bx
mov gs, ax
push rax
push rcx
push rdx
push rsi
push rdi
iretq"
:
: "{rax}"(5 << 3 | 3) // Data segment
"{rbx}"(6 << 3 | 3) // TLS segment
"{rcx}"(sp) // Stack pointer
"{rdx}"(3 << 12 | 1 << 9) // Flags - Set IOPL and interrupt enable flag
"{rsi}"(4 << 3 | 3) // Code segment
"{rdi}"(ip) // Instruction Pointer
:
: "intel", "volatile"
);
unreachable!();
}
|
{
loop {}
}
|
identifier_body
|
lib.rs
|
#![feature(abi_x86_interrupt)]
#![feature(asm)]
#![feature(const_fn, unique)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![no_std]
#![feature(alloc, collections)]
#![feature(core_intrinsics)]
extern crate bit_field;
#[macro_use]
extern crate bitflags;
extern crate raw_cpuid;
#[macro_use]
|
extern crate x86_64;
extern crate hole_list_allocator;
extern crate alloc;
extern crate collections;
#[macro_use]
extern crate once;
// Make constants public
pub use consts::*;
#[macro_use]
/// Console handling
pub mod vga_buffer;
/// Kernel message writer
pub mod kernel_messaging;
/// ACPI manager
pub mod acpi;
/// Architecture constants
pub mod consts;
/// Architecture context
pub mod context;
/// Devices management
pub mod device;
/// Memory management
pub mod memory;
/// Interrupt instructions
pub mod interrupts;
/// Initialization and start function
pub mod start;
/// Timer functions
pub mod time;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
#[lang = "panic_fmt"]
#[no_mangle]
pub extern "C" fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line: u32) ->! {
println!("\n\nPANIC in {} at line {}:", file, line);
println!(" {}", fmt);
loop {}
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn _Unwind_Resume() ->! {
loop {}
}
/// Enter in usermode.
///
/// This functions never returns.
pub unsafe fn usermode(ip: usize, sp: usize) ->! {
asm!("
mov ds, ax
mov es, ax
mov fs, bx
mov gs, ax
push rax
push rcx
push rdx
push rsi
push rdi
iretq"
:
: "{rax}"(5 << 3 | 3) // Data segment
"{rbx}"(6 << 3 | 3) // TLS segment
"{rcx}"(sp) // Stack pointer
"{rdx}"(3 << 12 | 1 << 9) // Flags - Set IOPL and interrupt enable flag
"{rsi}"(4 << 3 | 3) // Code segment
"{rdi}"(ip) // Instruction Pointer
:
: "intel", "volatile"
);
unreachable!();
}
|
extern crate lazy_static;
extern crate multiboot2;
extern crate rlibc;
extern crate spin;
extern crate volatile;
|
random_line_split
|
block-scoped-shadow.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// edition:2018
enum Foo {}
struct std;
fn main() {
enum
|
{ A, B }
use Foo::*;
//~^ ERROR `Foo` is ambiguous
let _ = (A, B);
fn std() {}
enum std {}
use std as foo;
//~^ ERROR `std` is ambiguous
//~| ERROR `std` is ambiguous
}
|
Foo
|
identifier_name
|
_match.rs
|
}
fcx.write_ty(pat.id, pat_ty);
// somewhat surprising: in this case, the subtyping
// relation goes the opposite way as the other
// cases. Actually what we really want is not a subtyping
// relation at all but rather that there exists a LUB (so
// that they can be compared). However, in practice,
// constants are always scalars or strings. For scalars
// subtyping is irrelevant, and for strings `expr_ty` is
// type is `&'static str`, so if we say that
//
// &'static str <: expected
//
// that's equivalent to there existing a LUB.
demand::suptype(fcx, pat.span, expected, pat_ty);
}
ast::PatRange(ref begin, ref end) => {
check_expr(fcx, begin);
check_expr(fcx, end);
let lhs_ty = fcx.expr_ty(begin);
let rhs_ty = fcx.expr_ty(end);
// Check that both end-points are of numeric or char type.
let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char();
let lhs_compat = numeric_or_char(lhs_ty);
let rhs_compat = numeric_or_char(rhs_ty);
if!lhs_compat ||!rhs_compat {
let span = if!lhs_compat &&!rhs_compat {
pat.span
} else if!lhs_compat {
begin.span
} else {
end.span
};
// Note: spacing here is intentional, we want a space before "start" and "end".
span_err!(tcx.sess, span, E0029,
"only char and numeric types are allowed in range patterns\n \
start type: {}\n end type: {}",
fcx.infcx().ty_to_string(lhs_ty),
fcx.infcx().ty_to_string(rhs_ty)
);
return;
}
// Check that the types of the end-points can be unified.
let types_unify = require_same_types(
tcx, Some(fcx.infcx()), false, pat.span, rhs_ty, lhs_ty,
|| "mismatched types in range".to_string()
);
// It's ok to return without a message as `require_same_types` prints an error.
if!types_unify {
return;
}
// Now that we know the types can be unified we find the unified type and use
// it to type the entire expression.
let common_type = fcx.infcx().resolve_type_vars_if_possible(&lhs_ty);
fcx.write_ty(pat.id, common_type);
// subtyping doesn't matter here, as the value is some kind of scalar
demand::eqtype(fcx, pat.span, expected, lhs_ty);
}
ast::PatEnum(..) | ast::PatIdent(..) if pat_is_resolved_const(&tcx.def_map, pat) => {
let const_did = tcx.def_map.borrow().get(&pat.id).unwrap().def_id();
let const_scheme = tcx.lookup_item_type(const_did);
assert!(const_scheme.generics.is_empty());
let const_ty = pcx.fcx.instantiate_type_scheme(pat.span,
&Substs::empty(),
&const_scheme.ty);
fcx.write_ty(pat.id, const_ty);
// FIXME(#20489) -- we should limit the types here to scalars or something!
// As with PatLit, what we really want here is that there
// exist a LUB, but for the cases that can occur, subtype
// is good enough.
demand::suptype(fcx, pat.span, expected, const_ty);
}
ast::PatIdent(bm, ref path, ref sub) if pat_is_binding(&tcx.def_map, pat) => {
let typ = fcx.local_ty(pat.span, pat.id);
match bm {
ast::BindByRef(mutbl) => {
// if the binding is like
// ref x | ref const x | ref mut x
// then `x` is assigned a value of type `&M T` where M is the mutability
// and T is the expected type.
let region_var = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl };
let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt);
// `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is
// required. However, we use equality, which is stronger. See (*) for
// an explanation.
demand::eqtype(fcx, pat.span, region_ty, typ);
}
// otherwise the type of x is the expected type T
ast::BindByValue(_) => {
// As above, `T <: typeof(x)` is required but we
// use equality, see (*) below.
demand::eqtype(fcx, pat.span, expected, typ);
}
}
fcx.write_ty(pat.id, typ);
// if there are multiple arms, make sure they all agree on
// what the type of the binding `x` ought to be
let canon_id = *pcx.map.get(&path.node).unwrap();
if canon_id!= pat.id {
let ct = fcx.local_ty(pat.span, canon_id);
demand::eqtype(fcx, pat.span, ct, typ);
}
if let Some(ref p) = *sub {
check_pat(pcx, &**p, expected);
}
}
ast::PatIdent(_, ref path, _) => {
let path = ast_util::ident_to_path(path.span, path.node);
check_pat_enum(pcx, pat, &path, Some(&[]), expected);
}
ast::PatEnum(ref path, ref subpats) => {
let subpats = subpats.as_ref().map(|v| &v[..]);
check_pat_enum(pcx, pat, path, subpats, expected);
}
ast::PatQPath(ref qself, ref path) => {
let self_ty = fcx.to_ty(&qself.ty);
let path_res = if let Some(&d) = tcx.def_map.borrow().get(&pat.id) {
d
} else if qself.position == 0 {
def::PathResolution {
// This is just a sentinel for finish_resolving_def_to_ty.
base_def: def::DefMod(ast_util::local_def(ast::CRATE_NODE_ID)),
last_private: LastMod(AllPublic),
depth: path.segments.len()
}
} else {
tcx.sess.span_bug(pat.span,
&format!("unbound path {:?}", pat))
};
if let Some((opt_ty, segments, def)) =
resolve_ty_and_def_ufcs(fcx, path_res, Some(self_ty),
path, pat.span, pat.id) {
if check_assoc_item_is_const(pcx, def, pat.span) {
let scheme = tcx.lookup_item_type(def.def_id());
let predicates = tcx.lookup_predicates(def.def_id());
instantiate_path(fcx, segments,
scheme, &predicates,
opt_ty, def, pat.span, pat.id);
let const_ty = fcx.node_ty(pat.id);
demand::suptype(fcx, pat.span, expected, const_ty);
} else {
fcx.write_error(pat.id)
}
}
}
ast::PatStruct(ref path, ref fields, etc) => {
check_pat_struct(pcx, pat, path, fields, etc, expected);
}
ast::PatTup(ref elements) => {
let element_tys: Vec<_> =
(0..elements.len()).map(|_| fcx.infcx().next_ty_var())
.collect();
let pat_ty = tcx.mk_tup(element_tys.clone());
fcx.write_ty(pat.id, pat_ty);
demand::eqtype(fcx, pat.span, expected, pat_ty);
for (element_pat, element_ty) in elements.iter().zip(element_tys) {
check_pat(pcx, &**element_pat, element_ty);
}
}
ast::PatBox(ref inner) => {
let inner_ty = fcx.infcx().next_ty_var();
let uniq_ty = tcx.mk_box(inner_ty);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
// Here, `demand::subtype` is good enough, but I don't
// think any errors can be introduced by using
// `demand::eqtype`.
demand::eqtype(fcx, pat.span, expected, uniq_ty);
fcx.write_ty(pat.id, uniq_ty);
check_pat(pcx, &**inner, inner_ty);
} else {
fcx.write_error(pat.id);
check_pat(pcx, &**inner, tcx.types.err);
}
}
ast::PatRegion(ref inner, mutbl) => {
let inner_ty = fcx.infcx().next_ty_var();
let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl };
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
// `demand::subtype` would be good enough, but using
// `eqtype` turns out to be equally general. See (*)
// below for details.
demand::eqtype(fcx, pat.span, expected, rptr_ty);
fcx.write_ty(pat.id, rptr_ty);
check_pat(pcx, &**inner, inner_ty);
} else {
fcx.write_error(pat.id);
check_pat(pcx, &**inner, tcx.types.err);
}
}
ast::PatVec(ref before, ref slice, ref after) => {
let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
let inner_ty = fcx.infcx().next_ty_var();
let pat_ty = match expected_ty.sty {
ty::TyArray(_, size) => tcx.mk_array(inner_ty, {
let min_len = before.len() + after.len();
match *slice {
Some(_) => cmp::max(min_len, size),
None => min_len
}
}),
_ => {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
ty: tcx.mk_slice(inner_ty),
mutbl: expected_ty.builtin_deref(true).map(|mt| mt.mutbl)
.unwrap_or(ast::MutImmutable)
})
}
};
fcx.write_ty(pat.id, pat_ty);
// `demand::subtype` would be good enough, but using
// `eqtype` turns out to be equally general. See (*)
// below for details.
demand::eqtype(fcx, pat.span, expected, pat_ty);
for elt in before {
check_pat(pcx, &**elt, inner_ty);
}
if let Some(ref slice) = *slice {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let mutbl = expected_ty.builtin_deref(true)
.map_or(ast::MutImmutable, |mt| mt.mutbl);
let slice_ty = tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
ty: tcx.mk_slice(inner_ty),
mutbl: mutbl
});
check_pat(pcx, &**slice, slice_ty);
}
for elt in after {
check_pat(pcx, &**elt, inner_ty);
}
}
ast::PatMac(_) => tcx.sess.bug("unexpanded macro")
}
// (*) In most of the cases above (literals and constants being
// the exception), we relate types using strict equality, evewn
// though subtyping would be sufficient. There are a few reasons
// for this, some of which are fairly subtle and which cost me
// (nmatsakis) an hour or two debugging to remember, so I thought
// I'd write them down this time.
//
// 1. There is no loss of expressiveness here, though it does
// cause some inconvenience. What we are saying is that the type
// of `x` becomes *exactly* what is expected. This can cause unnecessary
// errors in some cases, such as this one:
// it will cause errors in a case like this:
//
// ```
// fn foo<'x>(x: &'x int) {
// let a = 1;
// let mut z = x;
// z = &a;
// }
// ```
//
// The reason we might get an error is that `z` might be
// assigned a type like `&'x int`, and then we would have
// a problem when we try to assign `&a` to `z`, because
// the lifetime of `&a` (i.e., the enclosing block) is
// shorter than `'x`.
//
// HOWEVER, this code works fine. The reason is that the
// expected type here is whatever type the user wrote, not
// the initializer's type. In this case the user wrote
// nothing, so we are going to create a type variable `Z`.
// Then we will assign the type of the initializer (`&'x
// int`) as a subtype of `Z`: `&'x int <: Z`. And hence we
// will instantiate `Z` as a type `&'0 int` where `'0` is
// a fresh region variable, with the constraint that `'x :
// '0`. So basically we're all set.
//
// Note that there are two tests to check that this remains true
// (`regions-reassign-{match,let}-bound-pointer.rs`).
//
// 2. Things go horribly wrong if we use subtype. The reason for
// THIS is a fairly subtle case involving bound regions. See the
// `givens` field in `region_inference`, as well as the test
// `regions-relate-bound-regions-on-closures-to-inference-variables.rs`,
// for details. Short version is that we must sometimes detect
// relationships between specific region variables and regions
// bound in a closure signature, and that detection gets thrown
// off when we substitute fresh region variables here to enable
// subtyping.
}
fn check_assoc_item_is_const(pcx: &pat_ctxt, def: def::Def, span: Span) -> bool {
match def {
def::DefAssociatedConst(..) => true,
def::DefMethod(..) => {
span_err!(pcx.fcx.ccx.tcx.sess, span, E0327,
"associated items in match patterns must be constants");
false
}
_ => {
pcx.fcx.ccx.tcx.sess.span_bug(span, "non-associated item in
check_assoc_item_is_const");
}
}
}
pub fn check_dereferencable<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
span: Span, expected: Ty<'tcx>,
inner: &ast::Pat) -> bool {
let fcx = pcx.fcx;
let tcx = pcx.fcx.ccx.tcx;
if pat_is_binding(&tcx.def_map, inner) {
let expected = fcx.infcx().shallow_resolve(expected);
expected.builtin_deref(true).map_or(true, |mt| match mt.ty.sty {
ty::TyTrait(_) => {
// This is "x = SomeTrait" being reduced from
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
span_err!(tcx.sess, span, E0033,
"type `{}` cannot be dereferenced",
|
{
let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
if let ty::TyRef(_, mt) = expected_ty.sty {
if let ty::TySlice(_) = mt.ty.sty {
pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
tcx.mk_slice(tcx.types.u8))
}
}
}
|
conditional_block
|
|
_match.rs
|
scalar
demand::eqtype(fcx, pat.span, expected, lhs_ty);
}
ast::PatEnum(..) | ast::PatIdent(..) if pat_is_resolved_const(&tcx.def_map, pat) => {
let const_did = tcx.def_map.borrow().get(&pat.id).unwrap().def_id();
let const_scheme = tcx.lookup_item_type(const_did);
assert!(const_scheme.generics.is_empty());
let const_ty = pcx.fcx.instantiate_type_scheme(pat.span,
&Substs::empty(),
&const_scheme.ty);
fcx.write_ty(pat.id, const_ty);
// FIXME(#20489) -- we should limit the types here to scalars or something!
// As with PatLit, what we really want here is that there
// exist a LUB, but for the cases that can occur, subtype
// is good enough.
demand::suptype(fcx, pat.span, expected, const_ty);
}
ast::PatIdent(bm, ref path, ref sub) if pat_is_binding(&tcx.def_map, pat) => {
let typ = fcx.local_ty(pat.span, pat.id);
match bm {
ast::BindByRef(mutbl) => {
// if the binding is like
// ref x | ref const x | ref mut x
// then `x` is assigned a value of type `&M T` where M is the mutability
// and T is the expected type.
let region_var = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl };
let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt);
// `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is
// required. However, we use equality, which is stronger. See (*) for
// an explanation.
demand::eqtype(fcx, pat.span, region_ty, typ);
}
// otherwise the type of x is the expected type T
ast::BindByValue(_) => {
// As above, `T <: typeof(x)` is required but we
// use equality, see (*) below.
demand::eqtype(fcx, pat.span, expected, typ);
}
}
fcx.write_ty(pat.id, typ);
// if there are multiple arms, make sure they all agree on
// what the type of the binding `x` ought to be
let canon_id = *pcx.map.get(&path.node).unwrap();
if canon_id!= pat.id {
let ct = fcx.local_ty(pat.span, canon_id);
demand::eqtype(fcx, pat.span, ct, typ);
}
if let Some(ref p) = *sub {
check_pat(pcx, &**p, expected);
}
}
ast::PatIdent(_, ref path, _) => {
let path = ast_util::ident_to_path(path.span, path.node);
check_pat_enum(pcx, pat, &path, Some(&[]), expected);
}
ast::PatEnum(ref path, ref subpats) => {
let subpats = subpats.as_ref().map(|v| &v[..]);
check_pat_enum(pcx, pat, path, subpats, expected);
}
ast::PatQPath(ref qself, ref path) => {
let self_ty = fcx.to_ty(&qself.ty);
let path_res = if let Some(&d) = tcx.def_map.borrow().get(&pat.id) {
d
} else if qself.position == 0 {
def::PathResolution {
// This is just a sentinel for finish_resolving_def_to_ty.
base_def: def::DefMod(ast_util::local_def(ast::CRATE_NODE_ID)),
last_private: LastMod(AllPublic),
depth: path.segments.len()
}
} else {
tcx.sess.span_bug(pat.span,
&format!("unbound path {:?}", pat))
};
if let Some((opt_ty, segments, def)) =
resolve_ty_and_def_ufcs(fcx, path_res, Some(self_ty),
path, pat.span, pat.id) {
if check_assoc_item_is_const(pcx, def, pat.span) {
let scheme = tcx.lookup_item_type(def.def_id());
let predicates = tcx.lookup_predicates(def.def_id());
instantiate_path(fcx, segments,
scheme, &predicates,
opt_ty, def, pat.span, pat.id);
let const_ty = fcx.node_ty(pat.id);
demand::suptype(fcx, pat.span, expected, const_ty);
} else {
fcx.write_error(pat.id)
}
}
}
ast::PatStruct(ref path, ref fields, etc) => {
check_pat_struct(pcx, pat, path, fields, etc, expected);
}
ast::PatTup(ref elements) => {
let element_tys: Vec<_> =
(0..elements.len()).map(|_| fcx.infcx().next_ty_var())
.collect();
let pat_ty = tcx.mk_tup(element_tys.clone());
fcx.write_ty(pat.id, pat_ty);
demand::eqtype(fcx, pat.span, expected, pat_ty);
for (element_pat, element_ty) in elements.iter().zip(element_tys) {
check_pat(pcx, &**element_pat, element_ty);
}
}
ast::PatBox(ref inner) => {
let inner_ty = fcx.infcx().next_ty_var();
let uniq_ty = tcx.mk_box(inner_ty);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
// Here, `demand::subtype` is good enough, but I don't
// think any errors can be introduced by using
// `demand::eqtype`.
demand::eqtype(fcx, pat.span, expected, uniq_ty);
fcx.write_ty(pat.id, uniq_ty);
check_pat(pcx, &**inner, inner_ty);
} else {
fcx.write_error(pat.id);
check_pat(pcx, &**inner, tcx.types.err);
}
}
ast::PatRegion(ref inner, mutbl) => {
let inner_ty = fcx.infcx().next_ty_var();
let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl };
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
// `demand::subtype` would be good enough, but using
// `eqtype` turns out to be equally general. See (*)
// below for details.
demand::eqtype(fcx, pat.span, expected, rptr_ty);
fcx.write_ty(pat.id, rptr_ty);
check_pat(pcx, &**inner, inner_ty);
} else {
fcx.write_error(pat.id);
check_pat(pcx, &**inner, tcx.types.err);
}
}
ast::PatVec(ref before, ref slice, ref after) => {
let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
let inner_ty = fcx.infcx().next_ty_var();
let pat_ty = match expected_ty.sty {
ty::TyArray(_, size) => tcx.mk_array(inner_ty, {
let min_len = before.len() + after.len();
match *slice {
Some(_) => cmp::max(min_len, size),
None => min_len
}
}),
_ => {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
ty: tcx.mk_slice(inner_ty),
mutbl: expected_ty.builtin_deref(true).map(|mt| mt.mutbl)
.unwrap_or(ast::MutImmutable)
})
}
};
fcx.write_ty(pat.id, pat_ty);
// `demand::subtype` would be good enough, but using
// `eqtype` turns out to be equally general. See (*)
// below for details.
demand::eqtype(fcx, pat.span, expected, pat_ty);
for elt in before {
check_pat(pcx, &**elt, inner_ty);
}
if let Some(ref slice) = *slice {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let mutbl = expected_ty.builtin_deref(true)
.map_or(ast::MutImmutable, |mt| mt.mutbl);
let slice_ty = tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
ty: tcx.mk_slice(inner_ty),
mutbl: mutbl
});
check_pat(pcx, &**slice, slice_ty);
}
for elt in after {
check_pat(pcx, &**elt, inner_ty);
}
}
ast::PatMac(_) => tcx.sess.bug("unexpanded macro")
}
// (*) In most of the cases above (literals and constants being
// the exception), we relate types using strict equality, evewn
// though subtyping would be sufficient. There are a few reasons
// for this, some of which are fairly subtle and which cost me
// (nmatsakis) an hour or two debugging to remember, so I thought
// I'd write them down this time.
//
// 1. There is no loss of expressiveness here, though it does
// cause some inconvenience. What we are saying is that the type
// of `x` becomes *exactly* what is expected. This can cause unnecessary
// errors in some cases, such as this one:
// it will cause errors in a case like this:
//
// ```
// fn foo<'x>(x: &'x int) {
// let a = 1;
// let mut z = x;
// z = &a;
// }
// ```
//
// The reason we might get an error is that `z` might be
// assigned a type like `&'x int`, and then we would have
// a problem when we try to assign `&a` to `z`, because
// the lifetime of `&a` (i.e., the enclosing block) is
// shorter than `'x`.
//
// HOWEVER, this code works fine. The reason is that the
// expected type here is whatever type the user wrote, not
// the initializer's type. In this case the user wrote
// nothing, so we are going to create a type variable `Z`.
// Then we will assign the type of the initializer (`&'x
// int`) as a subtype of `Z`: `&'x int <: Z`. And hence we
// will instantiate `Z` as a type `&'0 int` where `'0` is
// a fresh region variable, with the constraint that `'x :
// '0`. So basically we're all set.
//
// Note that there are two tests to check that this remains true
// (`regions-reassign-{match,let}-bound-pointer.rs`).
//
// 2. Things go horribly wrong if we use subtype. The reason for
// THIS is a fairly subtle case involving bound regions. See the
// `givens` field in `region_inference`, as well as the test
// `regions-relate-bound-regions-on-closures-to-inference-variables.rs`,
// for details. Short version is that we must sometimes detect
// relationships between specific region variables and regions
// bound in a closure signature, and that detection gets thrown
// off when we substitute fresh region variables here to enable
// subtyping.
}
fn check_assoc_item_is_const(pcx: &pat_ctxt, def: def::Def, span: Span) -> bool {
match def {
def::DefAssociatedConst(..) => true,
def::DefMethod(..) => {
span_err!(pcx.fcx.ccx.tcx.sess, span, E0327,
"associated items in match patterns must be constants");
false
}
_ => {
pcx.fcx.ccx.tcx.sess.span_bug(span, "non-associated item in
check_assoc_item_is_const");
}
}
}
pub fn check_dereferencable<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
span: Span, expected: Ty<'tcx>,
inner: &ast::Pat) -> bool {
let fcx = pcx.fcx;
let tcx = pcx.fcx.ccx.tcx;
if pat_is_binding(&tcx.def_map, inner) {
let expected = fcx.infcx().shallow_resolve(expected);
expected.builtin_deref(true).map_or(true, |mt| match mt.ty.sty {
ty::TyTrait(_) => {
// This is "x = SomeTrait" being reduced from
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
span_err!(tcx.sess, span, E0033,
"type `{}` cannot be dereferenced",
fcx.infcx().ty_to_string(expected));
false
}
_ => true
})
} else {
true
}
}
pub fn check_match<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
expr: &'tcx ast::Expr,
discrim: &'tcx ast::Expr,
arms: &'tcx [ast::Arm],
expected: Expectation<'tcx>,
match_src: ast::MatchSource) {
let tcx = fcx.ccx.tcx;
// Not entirely obvious: if matches may create ref bindings, we
// want to use the *precise* type of the discriminant, *not* some
// supertype, as the "discriminant type" (issue #23116).
let contains_ref_bindings = arms.iter()
.filter_map(|a| tcx.arm_contains_ref_binding(a))
.max_by(|m| match *m {
ast::MutMutable => 1,
ast::MutImmutable => 0,
});
let discrim_ty;
|
check_expr_with_lvalue_pref(fcx, discrim, LvaluePreference::from_mutbl(m));
discrim_ty = fcx.expr_ty(discrim);
} else {
//...but otherwise we want to use any supertype of the
// discriminant. This is sort of a workaround, see note (*) in
// `check_pat` for some details.
discrim_ty = fcx.infcx().next_ty_var();
check_expr_has_type(fcx, discrim, discrim_ty);
};
// Typecheck the patterns first, so that we get types for all the
// bindings.
for arm in arms {
let mut pcx = pat_ctxt {
fcx: fcx,
map: pat_id_map(&tcx.def_map, &*arm.pats[0]),
};
for p in &arm.pats {
check_pat(&mut pcx, &**p, discrim_ty);
}
}
// Now typecheck the blocks.
//
// The result of the match is the common supertype of all the
// arms. Start out the value as bottom, since it's the, well,
// bottom the type lattice, and we'll be moving up the lattice as
// we process each arm. (Note that any match with 0 arms is matching
// on any empty type and is therefore unreachable; should the flow
// of execution reach it, we will panic, so bottom is an appropriate
// type in that case)
let expected = expected.adjust_for_branches(fcx);
let result_ty = arms.iter().fold(fcx.infcx().next_diverging_ty_var(), |result_ty, arm| {
let bty = match expected {
// We don't coerce to `()` so that if the match expression is a
// statement it's branches can have any consistent type. That allows
// us to give better error messages (pointing to a usually better
// arm for inconsistent arms or to the whole match when a `()` type
// is required).
Expectation::ExpectHasType(ety) if ety!= fcx.tcx().mk_nil() => {
check_expr_coercable_to_type(fcx, &*arm.body, ety);
ety
}
_ => {
check_expr_with_expectation(fcx, &*arm.body, expected);
fcx.node_ty(arm.body.id)
}
};
if let Some(ref e) = arm.guard {
check_expr_has_type(fcx, &**e, tcx.types.bool);
}
if result_ty.references_error() || bty.references_error() {
tcx.types.err
} else {
let (origin, expected, found) =
|
if let Some(m) = contains_ref_bindings {
|
random_line_split
|
_match.rs
|
base_def: def::DefMod(ast_util::local_def(ast::CRATE_NODE_ID)),
last_private: LastMod(AllPublic),
depth: path.segments.len()
}
} else {
tcx.sess.span_bug(pat.span,
&format!("unbound path {:?}", pat))
};
if let Some((opt_ty, segments, def)) =
resolve_ty_and_def_ufcs(fcx, path_res, Some(self_ty),
path, pat.span, pat.id) {
if check_assoc_item_is_const(pcx, def, pat.span) {
let scheme = tcx.lookup_item_type(def.def_id());
let predicates = tcx.lookup_predicates(def.def_id());
instantiate_path(fcx, segments,
scheme, &predicates,
opt_ty, def, pat.span, pat.id);
let const_ty = fcx.node_ty(pat.id);
demand::suptype(fcx, pat.span, expected, const_ty);
} else {
fcx.write_error(pat.id)
}
}
}
ast::PatStruct(ref path, ref fields, etc) => {
check_pat_struct(pcx, pat, path, fields, etc, expected);
}
ast::PatTup(ref elements) => {
let element_tys: Vec<_> =
(0..elements.len()).map(|_| fcx.infcx().next_ty_var())
.collect();
let pat_ty = tcx.mk_tup(element_tys.clone());
fcx.write_ty(pat.id, pat_ty);
demand::eqtype(fcx, pat.span, expected, pat_ty);
for (element_pat, element_ty) in elements.iter().zip(element_tys) {
check_pat(pcx, &**element_pat, element_ty);
}
}
ast::PatBox(ref inner) => {
let inner_ty = fcx.infcx().next_ty_var();
let uniq_ty = tcx.mk_box(inner_ty);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
// Here, `demand::subtype` is good enough, but I don't
// think any errors can be introduced by using
// `demand::eqtype`.
demand::eqtype(fcx, pat.span, expected, uniq_ty);
fcx.write_ty(pat.id, uniq_ty);
check_pat(pcx, &**inner, inner_ty);
} else {
fcx.write_error(pat.id);
check_pat(pcx, &**inner, tcx.types.err);
}
}
ast::PatRegion(ref inner, mutbl) => {
let inner_ty = fcx.infcx().next_ty_var();
let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl };
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
// `demand::subtype` would be good enough, but using
// `eqtype` turns out to be equally general. See (*)
// below for details.
demand::eqtype(fcx, pat.span, expected, rptr_ty);
fcx.write_ty(pat.id, rptr_ty);
check_pat(pcx, &**inner, inner_ty);
} else {
fcx.write_error(pat.id);
check_pat(pcx, &**inner, tcx.types.err);
}
}
ast::PatVec(ref before, ref slice, ref after) => {
let expected_ty = structurally_resolved_type(fcx, pat.span, expected);
let inner_ty = fcx.infcx().next_ty_var();
let pat_ty = match expected_ty.sty {
ty::TyArray(_, size) => tcx.mk_array(inner_ty, {
let min_len = before.len() + after.len();
match *slice {
Some(_) => cmp::max(min_len, size),
None => min_len
}
}),
_ => {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
ty: tcx.mk_slice(inner_ty),
mutbl: expected_ty.builtin_deref(true).map(|mt| mt.mutbl)
.unwrap_or(ast::MutImmutable)
})
}
};
fcx.write_ty(pat.id, pat_ty);
// `demand::subtype` would be good enough, but using
// `eqtype` turns out to be equally general. See (*)
// below for details.
demand::eqtype(fcx, pat.span, expected, pat_ty);
for elt in before {
check_pat(pcx, &**elt, inner_ty);
}
if let Some(ref slice) = *slice {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let mutbl = expected_ty.builtin_deref(true)
.map_or(ast::MutImmutable, |mt| mt.mutbl);
let slice_ty = tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut {
ty: tcx.mk_slice(inner_ty),
mutbl: mutbl
});
check_pat(pcx, &**slice, slice_ty);
}
for elt in after {
check_pat(pcx, &**elt, inner_ty);
}
}
ast::PatMac(_) => tcx.sess.bug("unexpanded macro")
}
// (*) In most of the cases above (literals and constants being
// the exception), we relate types using strict equality, evewn
// though subtyping would be sufficient. There are a few reasons
// for this, some of which are fairly subtle and which cost me
// (nmatsakis) an hour or two debugging to remember, so I thought
// I'd write them down this time.
//
// 1. There is no loss of expressiveness here, though it does
// cause some inconvenience. What we are saying is that the type
// of `x` becomes *exactly* what is expected. This can cause unnecessary
// errors in some cases, such as this one:
// it will cause errors in a case like this:
//
// ```
// fn foo<'x>(x: &'x int) {
// let a = 1;
// let mut z = x;
// z = &a;
// }
// ```
//
// The reason we might get an error is that `z` might be
// assigned a type like `&'x int`, and then we would have
// a problem when we try to assign `&a` to `z`, because
// the lifetime of `&a` (i.e., the enclosing block) is
// shorter than `'x`.
//
// HOWEVER, this code works fine. The reason is that the
// expected type here is whatever type the user wrote, not
// the initializer's type. In this case the user wrote
// nothing, so we are going to create a type variable `Z`.
// Then we will assign the type of the initializer (`&'x
// int`) as a subtype of `Z`: `&'x int <: Z`. And hence we
// will instantiate `Z` as a type `&'0 int` where `'0` is
// a fresh region variable, with the constraint that `'x :
// '0`. So basically we're all set.
//
// Note that there are two tests to check that this remains true
// (`regions-reassign-{match,let}-bound-pointer.rs`).
//
// 2. Things go horribly wrong if we use subtype. The reason for
// THIS is a fairly subtle case involving bound regions. See the
// `givens` field in `region_inference`, as well as the test
// `regions-relate-bound-regions-on-closures-to-inference-variables.rs`,
// for details. Short version is that we must sometimes detect
// relationships between specific region variables and regions
// bound in a closure signature, and that detection gets thrown
// off when we substitute fresh region variables here to enable
// subtyping.
}
fn check_assoc_item_is_const(pcx: &pat_ctxt, def: def::Def, span: Span) -> bool {
match def {
def::DefAssociatedConst(..) => true,
def::DefMethod(..) => {
span_err!(pcx.fcx.ccx.tcx.sess, span, E0327,
"associated items in match patterns must be constants");
false
}
_ => {
pcx.fcx.ccx.tcx.sess.span_bug(span, "non-associated item in
check_assoc_item_is_const");
}
}
}
pub fn check_dereferencable<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>,
span: Span, expected: Ty<'tcx>,
inner: &ast::Pat) -> bool {
let fcx = pcx.fcx;
let tcx = pcx.fcx.ccx.tcx;
if pat_is_binding(&tcx.def_map, inner) {
let expected = fcx.infcx().shallow_resolve(expected);
expected.builtin_deref(true).map_or(true, |mt| match mt.ty.sty {
ty::TyTrait(_) => {
// This is "x = SomeTrait" being reduced from
// "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
span_err!(tcx.sess, span, E0033,
"type `{}` cannot be dereferenced",
fcx.infcx().ty_to_string(expected));
false
}
_ => true
})
} else {
true
}
}
pub fn check_match<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
expr: &'tcx ast::Expr,
discrim: &'tcx ast::Expr,
arms: &'tcx [ast::Arm],
expected: Expectation<'tcx>,
match_src: ast::MatchSource) {
let tcx = fcx.ccx.tcx;
// Not entirely obvious: if matches may create ref bindings, we
// want to use the *precise* type of the discriminant, *not* some
// supertype, as the "discriminant type" (issue #23116).
let contains_ref_bindings = arms.iter()
.filter_map(|a| tcx.arm_contains_ref_binding(a))
.max_by(|m| match *m {
ast::MutMutable => 1,
ast::MutImmutable => 0,
});
let discrim_ty;
if let Some(m) = contains_ref_bindings {
check_expr_with_lvalue_pref(fcx, discrim, LvaluePreference::from_mutbl(m));
discrim_ty = fcx.expr_ty(discrim);
} else {
//...but otherwise we want to use any supertype of the
// discriminant. This is sort of a workaround, see note (*) in
// `check_pat` for some details.
discrim_ty = fcx.infcx().next_ty_var();
check_expr_has_type(fcx, discrim, discrim_ty);
};
// Typecheck the patterns first, so that we get types for all the
// bindings.
for arm in arms {
let mut pcx = pat_ctxt {
fcx: fcx,
map: pat_id_map(&tcx.def_map, &*arm.pats[0]),
};
for p in &arm.pats {
check_pat(&mut pcx, &**p, discrim_ty);
}
}
// Now typecheck the blocks.
//
// The result of the match is the common supertype of all the
// arms. Start out the value as bottom, since it's the, well,
// bottom the type lattice, and we'll be moving up the lattice as
// we process each arm. (Note that any match with 0 arms is matching
// on any empty type and is therefore unreachable; should the flow
// of execution reach it, we will panic, so bottom is an appropriate
// type in that case)
let expected = expected.adjust_for_branches(fcx);
let result_ty = arms.iter().fold(fcx.infcx().next_diverging_ty_var(), |result_ty, arm| {
let bty = match expected {
// We don't coerce to `()` so that if the match expression is a
// statement it's branches can have any consistent type. That allows
// us to give better error messages (pointing to a usually better
// arm for inconsistent arms or to the whole match when a `()` type
// is required).
Expectation::ExpectHasType(ety) if ety!= fcx.tcx().mk_nil() => {
check_expr_coercable_to_type(fcx, &*arm.body, ety);
ety
}
_ => {
check_expr_with_expectation(fcx, &*arm.body, expected);
fcx.node_ty(arm.body.id)
}
};
if let Some(ref e) = arm.guard {
check_expr_has_type(fcx, &**e, tcx.types.bool);
}
if result_ty.references_error() || bty.references_error() {
tcx.types.err
} else {
let (origin, expected, found) = match match_src {
/* if-let construct without an else block */
ast::MatchSource::IfLetDesugar { contains_else_clause }
if!contains_else_clause => (
infer::IfExpressionWithNoElse(expr.span),
bty,
result_ty,
),
_ => (
infer::MatchExpressionArm(expr.span, arm.body.span),
result_ty,
bty,
),
};
infer::common_supertype(
fcx.infcx(),
origin,
true,
expected,
found,
)
}
});
fcx.write_ty(expr.id, result_ty);
}
pub struct pat_ctxt<'a, 'tcx: 'a> {
pub fcx: &'a FnCtxt<'a, 'tcx>,
pub map: PatIdMap,
}
pub fn check_pat_struct<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>, pat: &'tcx ast::Pat,
path: &ast::Path, fields: &'tcx [Spanned<ast::FieldPat>],
etc: bool, expected: Ty<'tcx>) {
let fcx = pcx.fcx;
let tcx = pcx.fcx.ccx.tcx;
let def = tcx.def_map.borrow().get(&pat.id).unwrap().full_def();
let (adt_def, variant) = match def {
def::DefTrait(_) => {
let name = pprust::path_to_string(path);
span_err!(tcx.sess, pat.span, E0168,
"use of trait `{}` in a struct pattern", name);
fcx.write_error(pat.id);
for field in fields {
check_pat(pcx, &*field.node.pat, tcx.types.err);
}
return;
},
_ => {
let def_type = tcx.lookup_item_type(def.def_id());
match def_type.ty.sty {
ty::TyStruct(struct_def, _) =>
(struct_def, struct_def.struct_variant()),
ty::TyEnum(enum_def, _)
if def == def::DefVariant(enum_def.did, def.def_id(), true) =>
(enum_def, enum_def.variant_of_def(def)),
_ => {
let name = pprust::path_to_string(path);
span_err!(tcx.sess, pat.span, E0163,
"`{}` does not name a struct or a struct variant", name);
fcx.write_error(pat.id);
for field in fields {
check_pat(pcx, &*field.node.pat, tcx.types.err);
}
return;
}
}
}
};
instantiate_path(pcx.fcx,
&path.segments,
adt_def.type_scheme(tcx),
&adt_def.predicates(tcx),
None,
def,
pat.span,
pat.id);
let pat_ty = fcx.node_ty(pat.id);
demand::eqtype(fcx, pat.span, expected, pat_ty);
let item_substs = fcx
.item_substs()
.get(&pat.id)
.map(|substs| substs.substs.clone())
.unwrap_or_else(|| Substs::empty());
check_struct_pat_fields(pcx, pat.span, fields, variant, &item_substs, etc);
}
pub fn
|
check_pat_enum
|
identifier_name
|
|
upper_half.rs
|
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base::num::conversion::traits::{JoinHalves, SplitInHalf};
use malachite_base_test_util::generators::unsigned_gen;
fn upper_half_test_helper<T: PrimitiveUnsigned + SplitInHalf>(n: T, out: T::Half)
where
T::Half: PrimitiveUnsigned,
|
#[test]
pub fn test_upper_half() {
upper_half_test_helper(0u64, 0u32);
upper_half_test_helper(1u64, 0u32);
upper_half_test_helper(u16::from(u8::MAX), 0);
upper_half_test_helper(u16::from(u8::MAX) + 1, 1);
upper_half_test_helper(u16::MAX, u8::MAX);
upper_half_test_helper(258u16, 1u8);
upper_half_test_helper(0xabcd1234u32, 0xabcd);
}
fn upper_half_properties_helper<T: JoinHalves + PrimitiveUnsigned + SplitInHalf>() {
unsigned_gen::<T>().test_properties(|n| {
let upper = n.upper_half();
assert_eq!(T::join_halves(upper, n.lower_half()), n);
});
}
#[test]
fn upper_half_properties() {
upper_half_properties_helper::<u16>();
upper_half_properties_helper::<u32>();
upper_half_properties_helper::<u64>();
upper_half_properties_helper::<u128>();
}
|
{
assert_eq!(n.upper_half(), out);
}
|
identifier_body
|
upper_half.rs
|
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base::num::conversion::traits::{JoinHalves, SplitInHalf};
use malachite_base_test_util::generators::unsigned_gen;
fn upper_half_test_helper<T: PrimitiveUnsigned + SplitInHalf>(n: T, out: T::Half)
where
T::Half: PrimitiveUnsigned,
{
assert_eq!(n.upper_half(), out);
|
pub fn test_upper_half() {
upper_half_test_helper(0u64, 0u32);
upper_half_test_helper(1u64, 0u32);
upper_half_test_helper(u16::from(u8::MAX), 0);
upper_half_test_helper(u16::from(u8::MAX) + 1, 1);
upper_half_test_helper(u16::MAX, u8::MAX);
upper_half_test_helper(258u16, 1u8);
upper_half_test_helper(0xabcd1234u32, 0xabcd);
}
fn upper_half_properties_helper<T: JoinHalves + PrimitiveUnsigned + SplitInHalf>() {
unsigned_gen::<T>().test_properties(|n| {
let upper = n.upper_half();
assert_eq!(T::join_halves(upper, n.lower_half()), n);
});
}
#[test]
fn upper_half_properties() {
upper_half_properties_helper::<u16>();
upper_half_properties_helper::<u32>();
upper_half_properties_helper::<u64>();
upper_half_properties_helper::<u128>();
}
|
}
#[test]
|
random_line_split
|
upper_half.rs
|
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base::num::conversion::traits::{JoinHalves, SplitInHalf};
use malachite_base_test_util::generators::unsigned_gen;
fn upper_half_test_helper<T: PrimitiveUnsigned + SplitInHalf>(n: T, out: T::Half)
where
T::Half: PrimitiveUnsigned,
{
assert_eq!(n.upper_half(), out);
}
#[test]
pub fn
|
() {
upper_half_test_helper(0u64, 0u32);
upper_half_test_helper(1u64, 0u32);
upper_half_test_helper(u16::from(u8::MAX), 0);
upper_half_test_helper(u16::from(u8::MAX) + 1, 1);
upper_half_test_helper(u16::MAX, u8::MAX);
upper_half_test_helper(258u16, 1u8);
upper_half_test_helper(0xabcd1234u32, 0xabcd);
}
fn upper_half_properties_helper<T: JoinHalves + PrimitiveUnsigned + SplitInHalf>() {
unsigned_gen::<T>().test_properties(|n| {
let upper = n.upper_half();
assert_eq!(T::join_halves(upper, n.lower_half()), n);
});
}
#[test]
fn upper_half_properties() {
upper_half_properties_helper::<u16>();
upper_half_properties_helper::<u32>();
upper_half_properties_helper::<u64>();
upper_half_properties_helper::<u128>();
}
|
test_upper_half
|
identifier_name
|
main.rs
|
/* Periodically crawl web pages and alert the user of changes
*
* Copyright (C) 2016 Owen Stenson
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* More information in the enclosed `LICENSE' file
*/
/* main.rs is a mess
* mostly used to test stuff before it goes in lib.rs as a unit test.
* pardon the sloppiness
* NOTE: future default location: /var/cache
*/
mod parse;
mod event;
mod job;
mod action;
use std::path::Path;
use std::fs::DirBuilder;
use std::{thread,time};
extern crate chrono;
use chrono::{DateTime,Local,Duration,Timelike};
const LOG_FILE: &'static str = "/home/owen/page-mon/log";
//const CFG_FILE: &'static str = "/home/owen/page-mon/config_";
const CFG_FILE: &'static str = "/home/owen/shared/code/rust/page-mon/config";
fn main()
|
}
println!("Cache path: {:?}", cache_path);
let mut now = Local::now();
thread::sleep(time_to_next_minute(&now));
loop {
//iterate through the jobs, executing those for which it is time
for j in &jobs {
println!("Starting job {}", j.url);
if let Err(e) = j.fire_if_match(cache_path, &now) {
println!("Error in job {}: `{}`", j.url, e);
}
}
println!("\t{}", now);
now = Local::now();
thread::sleep(time_to_next_minute(&now));
}
}
fn time_to_next_minute(last_run: &DateTime<Local>) -> time::Duration {
//compute the amount of time program should wait before checking again
//if cycling through everything took more than 1 minute, then it'll miss
// the next minute (TODO?)
//It's okay to wait a little too long, but do not wait too short
let sec = last_run.second() as i64; // 0 <= sec <= 60
//TODO: will there ever be an instance when unwrap fails?
//DateTime::second() should never exceed 60, right?
Duration::seconds(60i64 - sec).to_std().unwrap()
}
|
{
//TODO: start threads for each tasks
//TODO: replace vec with map to futures?
let config = Path::new(CFG_FILE);
//parse input; panic if parsing fails
let (cmds, vars) = match parse::parse(config) {
Ok((c,v)) => (c,v),
Err(e) => panic!(e.join("\n")),
};
//panic! if a job is invalid
let jobs: Vec<job::Job> = cmds.into_iter()
.map(|c| job::Job::from(c, &vars).unwrap())
.collect();
//create cache directory if it's not there
let cache_path = parse::get_dir(&vars).expect("No `DIR` variable set in config");
if Path::new(cache_path).is_dir() == false {
DirBuilder::new().recursive(true).create(cache_path).expect("Failed to create cache dir");
|
identifier_body
|
main.rs
|
/* Periodically crawl web pages and alert the user of changes
*
* Copyright (C) 2016 Owen Stenson
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* More information in the enclosed `LICENSE' file
*/
/* main.rs is a mess
* mostly used to test stuff before it goes in lib.rs as a unit test.
* pardon the sloppiness
* NOTE: future default location: /var/cache
*/
mod parse;
mod event;
mod job;
mod action;
use std::path::Path;
use std::fs::DirBuilder;
use std::{thread,time};
extern crate chrono;
use chrono::{DateTime,Local,Duration,Timelike};
const LOG_FILE: &'static str = "/home/owen/page-mon/log";
//const CFG_FILE: &'static str = "/home/owen/page-mon/config_";
const CFG_FILE: &'static str = "/home/owen/shared/code/rust/page-mon/config";
fn main() {
//TODO: start threads for each tasks
//TODO: replace vec with map to futures?
let config = Path::new(CFG_FILE);
//parse input; panic if parsing fails
let (cmds, vars) = match parse::parse(config) {
Ok((c,v)) => (c,v),
Err(e) => panic!(e.join("\n")),
};
//panic! if a job is invalid
let jobs: Vec<job::Job> = cmds.into_iter()
.map(|c| job::Job::from(c, &vars).unwrap())
.collect();
//create cache directory if it's not there
let cache_path = parse::get_dir(&vars).expect("No `DIR` variable set in config");
if Path::new(cache_path).is_dir() == false {
DirBuilder::new().recursive(true).create(cache_path).expect("Failed to create cache dir");
}
println!("Cache path: {:?}", cache_path);
let mut now = Local::now();
thread::sleep(time_to_next_minute(&now));
loop {
//iterate through the jobs, executing those for which it is time
for j in &jobs {
println!("Starting job {}", j.url);
if let Err(e) = j.fire_if_match(cache_path, &now) {
println!("Error in job {}: `{}`", j.url, e);
}
}
println!("\t{}", now);
now = Local::now();
thread::sleep(time_to_next_minute(&now));
}
}
fn
|
(last_run: &DateTime<Local>) -> time::Duration {
//compute the amount of time program should wait before checking again
//if cycling through everything took more than 1 minute, then it'll miss
// the next minute (TODO?)
//It's okay to wait a little too long, but do not wait too short
let sec = last_run.second() as i64; // 0 <= sec <= 60
//TODO: will there ever be an instance when unwrap fails?
//DateTime::second() should never exceed 60, right?
Duration::seconds(60i64 - sec).to_std().unwrap()
}
|
time_to_next_minute
|
identifier_name
|
main.rs
|
/* Periodically crawl web pages and alert the user of changes
*
* Copyright (C) 2016 Owen Stenson
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* More information in the enclosed `LICENSE' file
*/
/* main.rs is a mess
* mostly used to test stuff before it goes in lib.rs as a unit test.
* pardon the sloppiness
* NOTE: future default location: /var/cache
*/
mod parse;
mod event;
mod job;
mod action;
use std::path::Path;
use std::fs::DirBuilder;
use std::{thread,time};
extern crate chrono;
use chrono::{DateTime,Local,Duration,Timelike};
const LOG_FILE: &'static str = "/home/owen/page-mon/log";
//const CFG_FILE: &'static str = "/home/owen/page-mon/config_";
const CFG_FILE: &'static str = "/home/owen/shared/code/rust/page-mon/config";
fn main() {
//TODO: start threads for each tasks
//TODO: replace vec with map to futures?
let config = Path::new(CFG_FILE);
//parse input; panic if parsing fails
let (cmds, vars) = match parse::parse(config) {
Ok((c,v)) => (c,v),
Err(e) => panic!(e.join("\n")),
};
//panic! if a job is invalid
let jobs: Vec<job::Job> = cmds.into_iter()
.map(|c| job::Job::from(c, &vars).unwrap())
.collect();
//create cache directory if it's not there
let cache_path = parse::get_dir(&vars).expect("No `DIR` variable set in config");
if Path::new(cache_path).is_dir() == false {
DirBuilder::new().recursive(true).create(cache_path).expect("Failed to create cache dir");
|
loop {
//iterate through the jobs, executing those for which it is time
for j in &jobs {
println!("Starting job {}", j.url);
if let Err(e) = j.fire_if_match(cache_path, &now) {
println!("Error in job {}: `{}`", j.url, e);
}
}
println!("\t{}", now);
now = Local::now();
thread::sleep(time_to_next_minute(&now));
}
}
fn time_to_next_minute(last_run: &DateTime<Local>) -> time::Duration {
//compute the amount of time program should wait before checking again
//if cycling through everything took more than 1 minute, then it'll miss
// the next minute (TODO?)
//It's okay to wait a little too long, but do not wait too short
let sec = last_run.second() as i64; // 0 <= sec <= 60
//TODO: will there ever be an instance when unwrap fails?
//DateTime::second() should never exceed 60, right?
Duration::seconds(60i64 - sec).to_std().unwrap()
}
|
}
println!("Cache path: {:?}", cache_path);
let mut now = Local::now();
thread::sleep(time_to_next_minute(&now));
|
random_line_split
|
range_collection.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
/// This module defines a trait for a collection of ranged values and an implementation
/// for this trait over sorted vector.
use std::ops::{Add, Range, Sub};
pub trait ToUsize {
fn to_usize(&self) -> usize;
}
pub trait FromUsize {
fn from_usize(s: usize) -> Self;
}
/// A key-value collection orderd by key with sequential key-value pairs grouped together.
/// Such group is called a range.
/// E.g. a set of collection of 5 pairs {1, a}, {2, b}, {10, x}, {11, y}, {12, z} will be grouped into two ranges: {1, [a,b]}, {10, [x,y,z]}
pub trait RangeCollection<K, V> {
/// Check if the given key is present in the collection.
fn have_item(&self, key: &K) -> bool;
/// Get value by key.
fn find_item(&self, key: &K) -> Option<&V>;
/// Get a range of keys from `key` till the end of the range that has `key`
/// Returns an empty range is key does not exist.
fn get_tail(&mut self, key: &K) -> Range<K>;
/// Remove all elements < `start` in the range that contains `start` - 1
fn remove_head(&mut self, start: &K);
/// Remove all elements >= `start` in the range that contains `start`
fn remove_tail(&mut self, start: &K);
/// Remove all elements >= `start`
fn remove_from(&mut self, start: &K);
/// Remove all elements >= `tail`
fn insert_item(&mut self, key: K, value: V);
/// Get an iterator over ranges
fn range_iter(&self) -> RangeIterator<K, V>;
}
/// Range iterator. For each range yelds a key for the first element of the range and a vector of values.
pub struct
|
<'c, K: 'c, V: 'c> {
range: usize,
collection: &'c Vec<(K, Vec<V>)>,
}
impl<'c, K: 'c, V: 'c> Iterator for RangeIterator<'c, K, V>
where K: Add<Output = K> + FromUsize + ToUsize + Copy,
{
type Item = (K, &'c [V]);
// The 'Iterator' trait only requires the 'next' method to be defined. The
// return type is 'Option<T>', 'None' is returned when the 'Iterator' is
// over, otherwise the next value is returned wrapped in 'Some'
fn next(&mut self) -> Option<(K, &'c [V])> {
if self.range > 0 {
self.range -= 1;
} else {
return None;
}
match self.collection.get(self.range) {
Some(&(ref k, ref vec)) => {
Some((*k, &vec))
}
None => None,
}
}
}
impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)>
where K: Ord + PartialEq + Add<Output = K> + Sub<Output = K> + Copy + FromUsize + ToUsize,
{
fn range_iter(&self) -> RangeIterator<K, V> {
RangeIterator {
range: self.len(),
collection: self,
}
}
fn have_item(&self, key: &K) -> bool {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(_) => true,
Err(index) => match self.get(index) {
Some(&(ref k, ref v)) => k <= key && (*k + FromUsize::from_usize(v.len())) > *key,
_ => false,
},
}
}
fn find_item(&self, key: &K) -> Option<&V> {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => self.get(index).unwrap().1.get(0),
Err(index) => match self.get(index) {
Some(&(ref k, ref v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => v.get((*key - *k).to_usize()),
_ => None,
},
}
}
fn get_tail(&mut self, key: &K) -> Range<K> {
let kv = *key;
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => kv..(kv + FromUsize::from_usize(self[index].1.len())),
Err(index) => {
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
kv..(*k + FromUsize::from_usize(v.len()))
}
_ => kv..kv,
}
}
}
}
/// Remove element key and following elements in the same range
fn remove_tail(&mut self, key: &K) {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => {
self.remove(index);
}
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
v.truncate((*key - *k).to_usize());
empty = v.is_empty();
}
_ => {}
}
if empty {
self.remove(index);
}
}
}
}
/// Remove the element and all following it.
fn remove_from(&mut self, key: &K) {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => {
self.drain(..index + 1);
}
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
v.truncate((*key - *k).to_usize());
empty = v.is_empty();
}
_ => {}
}
if empty {
self.drain(..index + 1);
} else {
self.drain(..index);
}
}
}
}
/// Remove range elements up to key
fn remove_head(&mut self, key: &K) {
if *key == FromUsize::from_usize(0) {
return;
}
let prev = *key - FromUsize::from_usize(1);
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(_) => {} //start of range, do nothing.
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref mut k, ref mut v)) if *k <= prev && (*k + FromUsize::from_usize(v.len())) > prev => {
let tail = v.split_off((*key - *k).to_usize());
empty = tail.is_empty();
let removed = ::std::mem::replace(v, tail);
let new_k = *k + FromUsize::from_usize(removed.len());
::std::mem::replace(k, new_k);
}
_ => {}
}
if empty {
self.remove(index);
}
}
}
}
fn insert_item(&mut self, key: K, value: V) {
assert!(!self.have_item(&key));
// todo: fix warning
let lower = match self.binary_search_by(|&(k, _)| k.cmp(&key).reverse()) {
Ok(index) | Err(index) => index,
};
let mut to_remove: Option<usize> = None;
if lower < self.len() && self[lower].0 + FromUsize::from_usize(self[lower].1.len()) == key {
// extend into existing chunk
self[lower].1.push(value);
} else {
// insert a new chunk
let range: Vec<V> = vec![value];
self.insert(lower, (key, range));
};
if lower > 0 {
let next = lower - 1;
if next < self.len() {
{
let (mut next, mut inserted) = self.split_at_mut(lower);
let mut next = next.last_mut().unwrap();
let mut inserted = inserted.first_mut().unwrap();
if next.0 == key + FromUsize::from_usize(1) {
inserted.1.append(&mut next.1);
to_remove = Some(lower - 1);
}
}
if let Some(r) = to_remove {
self.remove(r);
}
}
}
}
}
#[test]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_range() {
use std::cmp::Ordering;
let mut ranges: Vec<(u64, Vec<char>)> = Vec::new();
assert_eq!(ranges.range_iter().next(), None);
assert_eq!(ranges.find_item(&1), None);
assert!(!ranges.have_item(&1));
assert_eq!(ranges.get_tail(&0), 0..0);
ranges.insert_item(17, 'q');
assert_eq!(ranges.range_iter().cmp(vec![(17, &['q'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&17), Some(&'q'));
assert!(ranges.have_item(&17));
assert_eq!(ranges.get_tail(&17), 17..18);
ranges.insert_item(18, 'r');
assert_eq!(ranges.range_iter().cmp(vec![(17, &['q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&18), Some(&'r'));
assert!(ranges.have_item(&18));
assert_eq!(ranges.get_tail(&17), 17..19);
ranges.insert_item(16, 'p');
assert_eq!(ranges.range_iter().cmp(vec![(16, &['p', 'q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&16), Some(&'p'));
assert_eq!(ranges.find_item(&17), Some(&'q'));
assert_eq!(ranges.find_item(&18), Some(&'r'));
assert!(ranges.have_item(&16));
assert_eq!(ranges.get_tail(&17), 17..19);
assert_eq!(ranges.get_tail(&16), 16..19);
ranges.insert_item(2, 'b');
assert_eq!(ranges.range_iter().cmp(vec![(2, &['b'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&2), Some(&'b'));
ranges.insert_item(3, 'c');
ranges.insert_item(4, 'd');
assert_eq!(ranges.get_tail(&3), 3..5);
assert_eq!(ranges.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
let mut r = ranges.clone();
r.remove_head(&1);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&2);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&3);
assert_eq!(r.range_iter().cmp(vec![(3, &['c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&10);
assert_eq!(r.range_iter().cmp(vec![(3, &['c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&5);
assert_eq!(r.range_iter().cmp(vec![(16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&19);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_tail(&20);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_tail(&17);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal);
r.remove_tail(&16);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
r.remove_tail(&3);
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
r.remove_tail(&2);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_from(&20);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_from(&18);
assert!(!r.have_item(&18));
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q'][..])]), Ordering::Equal);
r.remove_from(&16);
assert!(!r.have_item(&16));
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
r.remove_from(&3);
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
r.remove_from(&1);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_from(&2);
assert_eq!(r.range_iter().next(), None);
}
|
RangeIterator
|
identifier_name
|
range_collection.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
/// This module defines a trait for a collection of ranged values and an implementation
/// for this trait over sorted vector.
use std::ops::{Add, Range, Sub};
pub trait ToUsize {
fn to_usize(&self) -> usize;
}
pub trait FromUsize {
fn from_usize(s: usize) -> Self;
}
/// A key-value collection orderd by key with sequential key-value pairs grouped together.
/// Such group is called a range.
/// E.g. a set of collection of 5 pairs {1, a}, {2, b}, {10, x}, {11, y}, {12, z} will be grouped into two ranges: {1, [a,b]}, {10, [x,y,z]}
pub trait RangeCollection<K, V> {
/// Check if the given key is present in the collection.
fn have_item(&self, key: &K) -> bool;
/// Get value by key.
fn find_item(&self, key: &K) -> Option<&V>;
/// Get a range of keys from `key` till the end of the range that has `key`
/// Returns an empty range is key does not exist.
fn get_tail(&mut self, key: &K) -> Range<K>;
/// Remove all elements < `start` in the range that contains `start` - 1
fn remove_head(&mut self, start: &K);
/// Remove all elements >= `start` in the range that contains `start`
fn remove_tail(&mut self, start: &K);
/// Remove all elements >= `start`
fn remove_from(&mut self, start: &K);
/// Remove all elements >= `tail`
fn insert_item(&mut self, key: K, value: V);
/// Get an iterator over ranges
fn range_iter(&self) -> RangeIterator<K, V>;
}
/// Range iterator. For each range yelds a key for the first element of the range and a vector of values.
pub struct RangeIterator<'c, K: 'c, V: 'c> {
range: usize,
collection: &'c Vec<(K, Vec<V>)>,
}
impl<'c, K: 'c, V: 'c> Iterator for RangeIterator<'c, K, V>
where K: Add<Output = K> + FromUsize + ToUsize + Copy,
{
type Item = (K, &'c [V]);
// The 'Iterator' trait only requires the 'next' method to be defined. The
// return type is 'Option<T>', 'None' is returned when the 'Iterator' is
// over, otherwise the next value is returned wrapped in 'Some'
fn next(&mut self) -> Option<(K, &'c [V])> {
if self.range > 0 {
self.range -= 1;
} else {
return None;
}
match self.collection.get(self.range) {
Some(&(ref k, ref vec)) => {
Some((*k, &vec))
}
None => None,
}
}
}
impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)>
where K: Ord + PartialEq + Add<Output = K> + Sub<Output = K> + Copy + FromUsize + ToUsize,
{
fn range_iter(&self) -> RangeIterator<K, V> {
RangeIterator {
range: self.len(),
collection: self,
}
}
fn have_item(&self, key: &K) -> bool {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(_) => true,
Err(index) => match self.get(index) {
Some(&(ref k, ref v)) => k <= key && (*k + FromUsize::from_usize(v.len())) > *key,
_ => false,
},
}
}
fn find_item(&self, key: &K) -> Option<&V> {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => self.get(index).unwrap().1.get(0),
Err(index) => match self.get(index) {
Some(&(ref k, ref v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => v.get((*key - *k).to_usize()),
_ => None,
},
}
}
fn get_tail(&mut self, key: &K) -> Range<K> {
let kv = *key;
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => kv..(kv + FromUsize::from_usize(self[index].1.len())),
Err(index) => {
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
kv..(*k + FromUsize::from_usize(v.len()))
}
_ => kv..kv,
}
}
}
}
/// Remove element key and following elements in the same range
fn remove_tail(&mut self, key: &K) {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => {
self.remove(index);
}
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
v.truncate((*key - *k).to_usize());
empty = v.is_empty();
}
_ => {}
}
if empty {
self.remove(index);
}
}
}
}
/// Remove the element and all following it.
fn remove_from(&mut self, key: &K) {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => {
self.drain(..index + 1);
}
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
v.truncate((*key - *k).to_usize());
empty = v.is_empty();
}
_ => {}
}
if empty {
self.drain(..index + 1);
} else {
self.drain(..index);
}
}
}
}
/// Remove range elements up to key
fn remove_head(&mut self, key: &K) {
if *key == FromUsize::from_usize(0) {
return;
}
let prev = *key - FromUsize::from_usize(1);
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(_) => {} //start of range, do nothing.
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref mut k, ref mut v)) if *k <= prev && (*k + FromUsize::from_usize(v.len())) > prev => {
let tail = v.split_off((*key - *k).to_usize());
empty = tail.is_empty();
let removed = ::std::mem::replace(v, tail);
let new_k = *k + FromUsize::from_usize(removed.len());
::std::mem::replace(k, new_k);
}
_ =>
|
}
if empty {
self.remove(index);
}
}
}
}
fn insert_item(&mut self, key: K, value: V) {
assert!(!self.have_item(&key));
// todo: fix warning
let lower = match self.binary_search_by(|&(k, _)| k.cmp(&key).reverse()) {
Ok(index) | Err(index) => index,
};
let mut to_remove: Option<usize> = None;
if lower < self.len() && self[lower].0 + FromUsize::from_usize(self[lower].1.len()) == key {
// extend into existing chunk
self[lower].1.push(value);
} else {
// insert a new chunk
let range: Vec<V> = vec![value];
self.insert(lower, (key, range));
};
if lower > 0 {
let next = lower - 1;
if next < self.len() {
{
let (mut next, mut inserted) = self.split_at_mut(lower);
let mut next = next.last_mut().unwrap();
let mut inserted = inserted.first_mut().unwrap();
if next.0 == key + FromUsize::from_usize(1) {
inserted.1.append(&mut next.1);
to_remove = Some(lower - 1);
}
}
if let Some(r) = to_remove {
self.remove(r);
}
}
}
}
}
#[test]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_range() {
use std::cmp::Ordering;
let mut ranges: Vec<(u64, Vec<char>)> = Vec::new();
assert_eq!(ranges.range_iter().next(), None);
assert_eq!(ranges.find_item(&1), None);
assert!(!ranges.have_item(&1));
assert_eq!(ranges.get_tail(&0), 0..0);
ranges.insert_item(17, 'q');
assert_eq!(ranges.range_iter().cmp(vec![(17, &['q'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&17), Some(&'q'));
assert!(ranges.have_item(&17));
assert_eq!(ranges.get_tail(&17), 17..18);
ranges.insert_item(18, 'r');
assert_eq!(ranges.range_iter().cmp(vec![(17, &['q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&18), Some(&'r'));
assert!(ranges.have_item(&18));
assert_eq!(ranges.get_tail(&17), 17..19);
ranges.insert_item(16, 'p');
assert_eq!(ranges.range_iter().cmp(vec![(16, &['p', 'q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&16), Some(&'p'));
assert_eq!(ranges.find_item(&17), Some(&'q'));
assert_eq!(ranges.find_item(&18), Some(&'r'));
assert!(ranges.have_item(&16));
assert_eq!(ranges.get_tail(&17), 17..19);
assert_eq!(ranges.get_tail(&16), 16..19);
ranges.insert_item(2, 'b');
assert_eq!(ranges.range_iter().cmp(vec![(2, &['b'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&2), Some(&'b'));
ranges.insert_item(3, 'c');
ranges.insert_item(4, 'd');
assert_eq!(ranges.get_tail(&3), 3..5);
assert_eq!(ranges.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
let mut r = ranges.clone();
r.remove_head(&1);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&2);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&3);
assert_eq!(r.range_iter().cmp(vec![(3, &['c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&10);
assert_eq!(r.range_iter().cmp(vec![(3, &['c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&5);
assert_eq!(r.range_iter().cmp(vec![(16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&19);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_tail(&20);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_tail(&17);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal);
r.remove_tail(&16);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
r.remove_tail(&3);
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
r.remove_tail(&2);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_from(&20);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_from(&18);
assert!(!r.have_item(&18));
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q'][..])]), Ordering::Equal);
r.remove_from(&16);
assert!(!r.have_item(&16));
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
r.remove_from(&3);
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
r.remove_from(&1);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_from(&2);
assert_eq!(r.range_iter().next(), None);
}
|
{}
|
conditional_block
|
range_collection.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
/// This module defines a trait for a collection of ranged values and an implementation
/// for this trait over sorted vector.
use std::ops::{Add, Range, Sub};
pub trait ToUsize {
fn to_usize(&self) -> usize;
}
pub trait FromUsize {
fn from_usize(s: usize) -> Self;
}
/// A key-value collection orderd by key with sequential key-value pairs grouped together.
/// Such group is called a range.
/// E.g. a set of collection of 5 pairs {1, a}, {2, b}, {10, x}, {11, y}, {12, z} will be grouped into two ranges: {1, [a,b]}, {10, [x,y,z]}
pub trait RangeCollection<K, V> {
/// Check if the given key is present in the collection.
fn have_item(&self, key: &K) -> bool;
/// Get value by key.
fn find_item(&self, key: &K) -> Option<&V>;
/// Get a range of keys from `key` till the end of the range that has `key`
/// Returns an empty range is key does not exist.
fn get_tail(&mut self, key: &K) -> Range<K>;
/// Remove all elements < `start` in the range that contains `start` - 1
fn remove_head(&mut self, start: &K);
/// Remove all elements >= `start` in the range that contains `start`
fn remove_tail(&mut self, start: &K);
/// Remove all elements >= `start`
fn remove_from(&mut self, start: &K);
/// Remove all elements >= `tail`
fn insert_item(&mut self, key: K, value: V);
/// Get an iterator over ranges
fn range_iter(&self) -> RangeIterator<K, V>;
}
/// Range iterator. For each range yelds a key for the first element of the range and a vector of values.
pub struct RangeIterator<'c, K: 'c, V: 'c> {
range: usize,
collection: &'c Vec<(K, Vec<V>)>,
}
impl<'c, K: 'c, V: 'c> Iterator for RangeIterator<'c, K, V>
where K: Add<Output = K> + FromUsize + ToUsize + Copy,
{
type Item = (K, &'c [V]);
// The 'Iterator' trait only requires the 'next' method to be defined. The
// return type is 'Option<T>', 'None' is returned when the 'Iterator' is
// over, otherwise the next value is returned wrapped in 'Some'
fn next(&mut self) -> Option<(K, &'c [V])> {
if self.range > 0 {
self.range -= 1;
} else {
return None;
}
match self.collection.get(self.range) {
Some(&(ref k, ref vec)) => {
Some((*k, &vec))
}
None => None,
}
}
}
impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)>
where K: Ord + PartialEq + Add<Output = K> + Sub<Output = K> + Copy + FromUsize + ToUsize,
{
fn range_iter(&self) -> RangeIterator<K, V> {
RangeIterator {
range: self.len(),
collection: self,
}
}
fn have_item(&self, key: &K) -> bool {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(_) => true,
Err(index) => match self.get(index) {
Some(&(ref k, ref v)) => k <= key && (*k + FromUsize::from_usize(v.len())) > *key,
_ => false,
},
}
}
fn find_item(&self, key: &K) -> Option<&V> {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => self.get(index).unwrap().1.get(0),
Err(index) => match self.get(index) {
Some(&(ref k, ref v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => v.get((*key - *k).to_usize()),
_ => None,
},
}
}
fn get_tail(&mut self, key: &K) -> Range<K> {
let kv = *key;
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => kv..(kv + FromUsize::from_usize(self[index].1.len())),
Err(index) => {
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
kv..(*k + FromUsize::from_usize(v.len()))
}
_ => kv..kv,
}
}
}
}
/// Remove element key and following elements in the same range
fn remove_tail(&mut self, key: &K) {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => {
self.remove(index);
}
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
v.truncate((*key - *k).to_usize());
empty = v.is_empty();
}
_ => {}
}
if empty {
self.remove(index);
}
}
}
}
/// Remove the element and all following it.
fn remove_from(&mut self, key: &K) {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => {
self.drain(..index + 1);
}
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
v.truncate((*key - *k).to_usize());
empty = v.is_empty();
}
_ => {}
}
if empty {
self.drain(..index + 1);
} else {
self.drain(..index);
}
}
}
}
/// Remove range elements up to key
fn remove_head(&mut self, key: &K) {
if *key == FromUsize::from_usize(0) {
return;
}
let prev = *key - FromUsize::from_usize(1);
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(_) => {} //start of range, do nothing.
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref mut k, ref mut v)) if *k <= prev && (*k + FromUsize::from_usize(v.len())) > prev => {
let tail = v.split_off((*key - *k).to_usize());
empty = tail.is_empty();
let removed = ::std::mem::replace(v, tail);
let new_k = *k + FromUsize::from_usize(removed.len());
::std::mem::replace(k, new_k);
}
_ => {}
}
if empty {
self.remove(index);
}
}
}
}
fn insert_item(&mut self, key: K, value: V)
|
{
let (mut next, mut inserted) = self.split_at_mut(lower);
let mut next = next.last_mut().unwrap();
let mut inserted = inserted.first_mut().unwrap();
if next.0 == key + FromUsize::from_usize(1) {
inserted.1.append(&mut next.1);
to_remove = Some(lower - 1);
}
}
if let Some(r) = to_remove {
self.remove(r);
}
}
}
}
}
#[test]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_range() {
use std::cmp::Ordering;
let mut ranges: Vec<(u64, Vec<char>)> = Vec::new();
assert_eq!(ranges.range_iter().next(), None);
assert_eq!(ranges.find_item(&1), None);
assert!(!ranges.have_item(&1));
assert_eq!(ranges.get_tail(&0), 0..0);
ranges.insert_item(17, 'q');
assert_eq!(ranges.range_iter().cmp(vec![(17, &['q'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&17), Some(&'q'));
assert!(ranges.have_item(&17));
assert_eq!(ranges.get_tail(&17), 17..18);
ranges.insert_item(18, 'r');
assert_eq!(ranges.range_iter().cmp(vec![(17, &['q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&18), Some(&'r'));
assert!(ranges.have_item(&18));
assert_eq!(ranges.get_tail(&17), 17..19);
ranges.insert_item(16, 'p');
assert_eq!(ranges.range_iter().cmp(vec![(16, &['p', 'q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&16), Some(&'p'));
assert_eq!(ranges.find_item(&17), Some(&'q'));
assert_eq!(ranges.find_item(&18), Some(&'r'));
assert!(ranges.have_item(&16));
assert_eq!(ranges.get_tail(&17), 17..19);
assert_eq!(ranges.get_tail(&16), 16..19);
ranges.insert_item(2, 'b');
assert_eq!(ranges.range_iter().cmp(vec![(2, &['b'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&2), Some(&'b'));
ranges.insert_item(3, 'c');
ranges.insert_item(4, 'd');
assert_eq!(ranges.get_tail(&3), 3..5);
assert_eq!(ranges.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
let mut r = ranges.clone();
r.remove_head(&1);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&2);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&3);
assert_eq!(r.range_iter().cmp(vec![(3, &['c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&10);
assert_eq!(r.range_iter().cmp(vec![(3, &['c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&5);
assert_eq!(r.range_iter().cmp(vec![(16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&19);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_tail(&20);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_tail(&17);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal);
r.remove_tail(&16);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
r.remove_tail(&3);
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
r.remove_tail(&2);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_from(&20);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_from(&18);
assert!(!r.have_item(&18));
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q'][..])]), Ordering::Equal);
r.remove_from(&16);
assert!(!r.have_item(&16));
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
r.remove_from(&3);
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
r.remove_from(&1);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_from(&2);
assert_eq!(r.range_iter().next(), None);
}
|
{
assert!(!self.have_item(&key));
// todo: fix warning
let lower = match self.binary_search_by(|&(k, _)| k.cmp(&key).reverse()) {
Ok(index) | Err(index) => index,
};
let mut to_remove: Option<usize> = None;
if lower < self.len() && self[lower].0 + FromUsize::from_usize(self[lower].1.len()) == key {
// extend into existing chunk
self[lower].1.push(value);
} else {
// insert a new chunk
let range: Vec<V> = vec![value];
self.insert(lower, (key, range));
};
if lower > 0 {
let next = lower - 1;
if next < self.len() {
|
identifier_body
|
range_collection.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
/// This module defines a trait for a collection of ranged values and an implementation
/// for this trait over sorted vector.
use std::ops::{Add, Range, Sub};
pub trait ToUsize {
fn to_usize(&self) -> usize;
}
pub trait FromUsize {
fn from_usize(s: usize) -> Self;
}
/// A key-value collection orderd by key with sequential key-value pairs grouped together.
/// Such group is called a range.
/// E.g. a set of collection of 5 pairs {1, a}, {2, b}, {10, x}, {11, y}, {12, z} will be grouped into two ranges: {1, [a,b]}, {10, [x,y,z]}
pub trait RangeCollection<K, V> {
/// Check if the given key is present in the collection.
fn have_item(&self, key: &K) -> bool;
/// Get value by key.
fn find_item(&self, key: &K) -> Option<&V>;
/// Get a range of keys from `key` till the end of the range that has `key`
/// Returns an empty range is key does not exist.
fn get_tail(&mut self, key: &K) -> Range<K>;
/// Remove all elements < `start` in the range that contains `start` - 1
fn remove_head(&mut self, start: &K);
/// Remove all elements >= `start` in the range that contains `start`
fn remove_tail(&mut self, start: &K);
/// Remove all elements >= `start`
fn remove_from(&mut self, start: &K);
/// Remove all elements >= `tail`
fn insert_item(&mut self, key: K, value: V);
/// Get an iterator over ranges
fn range_iter(&self) -> RangeIterator<K, V>;
}
/// Range iterator. For each range yelds a key for the first element of the range and a vector of values.
pub struct RangeIterator<'c, K: 'c, V: 'c> {
range: usize,
collection: &'c Vec<(K, Vec<V>)>,
}
impl<'c, K: 'c, V: 'c> Iterator for RangeIterator<'c, K, V>
where K: Add<Output = K> + FromUsize + ToUsize + Copy,
{
type Item = (K, &'c [V]);
// The 'Iterator' trait only requires the 'next' method to be defined. The
// return type is 'Option<T>', 'None' is returned when the 'Iterator' is
// over, otherwise the next value is returned wrapped in 'Some'
fn next(&mut self) -> Option<(K, &'c [V])> {
if self.range > 0 {
self.range -= 1;
} else {
return None;
}
match self.collection.get(self.range) {
Some(&(ref k, ref vec)) => {
Some((*k, &vec))
}
None => None,
}
}
}
impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)>
where K: Ord + PartialEq + Add<Output = K> + Sub<Output = K> + Copy + FromUsize + ToUsize,
{
fn range_iter(&self) -> RangeIterator<K, V> {
RangeIterator {
range: self.len(),
collection: self,
}
}
fn have_item(&self, key: &K) -> bool {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(_) => true,
Err(index) => match self.get(index) {
Some(&(ref k, ref v)) => k <= key && (*k + FromUsize::from_usize(v.len())) > *key,
_ => false,
},
}
}
fn find_item(&self, key: &K) -> Option<&V> {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => self.get(index).unwrap().1.get(0),
Err(index) => match self.get(index) {
Some(&(ref k, ref v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => v.get((*key - *k).to_usize()),
_ => None,
},
}
}
fn get_tail(&mut self, key: &K) -> Range<K> {
let kv = *key;
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => kv..(kv + FromUsize::from_usize(self[index].1.len())),
Err(index) => {
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
kv..(*k + FromUsize::from_usize(v.len()))
}
_ => kv..kv,
}
}
}
}
/// Remove element key and following elements in the same range
fn remove_tail(&mut self, key: &K) {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => {
self.remove(index);
}
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
v.truncate((*key - *k).to_usize());
empty = v.is_empty();
}
_ => {}
}
if empty {
self.remove(index);
}
}
}
}
/// Remove the element and all following it.
fn remove_from(&mut self, key: &K) {
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(index) => {
self.drain(..index + 1);
}
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => {
v.truncate((*key - *k).to_usize());
empty = v.is_empty();
}
_ => {}
}
if empty {
self.drain(..index + 1);
} else {
self.drain(..index);
}
}
}
}
/// Remove range elements up to key
fn remove_head(&mut self, key: &K) {
if *key == FromUsize::from_usize(0) {
return;
}
let prev = *key - FromUsize::from_usize(1);
match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) {
Ok(_) => {} //start of range, do nothing.
Err(index) => {
let mut empty = false;
match self.get_mut(index) {
Some(&mut (ref mut k, ref mut v)) if *k <= prev && (*k + FromUsize::from_usize(v.len())) > prev => {
let tail = v.split_off((*key - *k).to_usize());
empty = tail.is_empty();
let removed = ::std::mem::replace(v, tail);
let new_k = *k + FromUsize::from_usize(removed.len());
::std::mem::replace(k, new_k);
}
_ => {}
}
if empty {
self.remove(index);
}
}
}
}
fn insert_item(&mut self, key: K, value: V) {
assert!(!self.have_item(&key));
// todo: fix warning
let lower = match self.binary_search_by(|&(k, _)| k.cmp(&key).reverse()) {
Ok(index) | Err(index) => index,
};
let mut to_remove: Option<usize> = None;
if lower < self.len() && self[lower].0 + FromUsize::from_usize(self[lower].1.len()) == key {
// extend into existing chunk
self[lower].1.push(value);
} else {
// insert a new chunk
let range: Vec<V> = vec![value];
self.insert(lower, (key, range));
};
if lower > 0 {
let next = lower - 1;
if next < self.len() {
{
let (mut next, mut inserted) = self.split_at_mut(lower);
let mut next = next.last_mut().unwrap();
let mut inserted = inserted.first_mut().unwrap();
if next.0 == key + FromUsize::from_usize(1) {
inserted.1.append(&mut next.1);
to_remove = Some(lower - 1);
}
}
if let Some(r) = to_remove {
self.remove(r);
}
}
}
}
}
#[test]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_range() {
use std::cmp::Ordering;
let mut ranges: Vec<(u64, Vec<char>)> = Vec::new();
assert_eq!(ranges.range_iter().next(), None);
assert_eq!(ranges.find_item(&1), None);
assert!(!ranges.have_item(&1));
assert_eq!(ranges.get_tail(&0), 0..0);
ranges.insert_item(17, 'q');
assert_eq!(ranges.range_iter().cmp(vec![(17, &['q'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&17), Some(&'q'));
assert!(ranges.have_item(&17));
assert_eq!(ranges.get_tail(&17), 17..18);
ranges.insert_item(18, 'r');
assert_eq!(ranges.range_iter().cmp(vec![(17, &['q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&18), Some(&'r'));
assert!(ranges.have_item(&18));
assert_eq!(ranges.get_tail(&17), 17..19);
ranges.insert_item(16, 'p');
assert_eq!(ranges.range_iter().cmp(vec![(16, &['p', 'q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&16), Some(&'p'));
assert_eq!(ranges.find_item(&17), Some(&'q'));
assert_eq!(ranges.find_item(&18), Some(&'r'));
assert!(ranges.have_item(&16));
assert_eq!(ranges.get_tail(&17), 17..19);
assert_eq!(ranges.get_tail(&16), 16..19);
ranges.insert_item(2, 'b');
assert_eq!(ranges.range_iter().cmp(vec![(2, &['b'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
assert_eq!(ranges.find_item(&2), Some(&'b'));
ranges.insert_item(3, 'c');
ranges.insert_item(4, 'd');
assert_eq!(ranges.get_tail(&3), 3..5);
assert_eq!(ranges.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
let mut r = ranges.clone();
r.remove_head(&1);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&2);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&3);
assert_eq!(r.range_iter().cmp(vec![(3, &['c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&10);
assert_eq!(r.range_iter().cmp(vec![(3, &['c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&5);
assert_eq!(r.range_iter().cmp(vec![(16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_head(&19);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_tail(&20);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_tail(&17);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal);
r.remove_tail(&16);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
r.remove_tail(&3);
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
r.remove_tail(&2);
assert_eq!(r.range_iter().next(), None);
|
assert!(!r.have_item(&18));
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q'][..])]), Ordering::Equal);
r.remove_from(&16);
assert!(!r.have_item(&16));
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal);
r.remove_from(&3);
assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal);
r.remove_from(&1);
assert_eq!(r.range_iter().next(), None);
let mut r = ranges.clone();
r.remove_from(&2);
assert_eq!(r.range_iter().next(), None);
}
|
let mut r = ranges.clone();
r.remove_from(&20);
assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal);
r.remove_from(&18);
|
random_line_split
|
mod.rs
|
// Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::octets;
use super::Error;
use super::Result;
use self::table::DECODE_TABLE;
use self::table::ENCODE_TABLE;
pub fn decode(b: &mut octets::Octets) -> Result<Vec<u8>> {
// Max compression ratio is >= 0.5
let mut out = Vec::with_capacity(b.len() << 1);
let mut decoder = Decoder::new();
while b.cap() > 0 {
let byte = b.get_u8()?;
if let Some(b) = decoder.decode4(byte >> 4)? {
out.push(b);
}
if let Some(b) = decoder.decode4(byte & 0xf)? {
out.push(b);
}
}
if!decoder.is_final() {
return Err(Error::InvalidHuffmanEncoding);
}
Ok(out)
}
pub fn encode(src: &[u8], out: &mut octets::OctetsMut, low: bool) -> Result<()> {
let mut bits: u64 = 0;
let mut bits_left = 40;
for &b in src {
let b = if low { b.to_ascii_lowercase() } else { b };
let (nbits, code) = ENCODE_TABLE[b as usize];
bits |= code << (bits_left - nbits);
bits_left -= nbits;
while bits_left <= 32 {
out.put_u8((bits >> 32) as u8)?;
bits <<= 8;
bits_left += 8;
}
}
if bits_left!= 40 {
// This writes the EOS token
bits |= (1 << bits_left) - 1;
out.put_u8((bits >> 32) as u8)?;
}
Ok(())
}
pub fn encode_output_length(src: &[u8], low: bool) -> Result<usize> {
let mut bits: usize = 0;
for &b in src {
let b = if low
|
else { b };
let (nbits, _) = ENCODE_TABLE[b as usize];
bits += nbits;
}
let mut len = bits / 8;
if bits & 7!= 0 {
len += 1;
}
Ok(len)
}
struct Decoder {
state: usize,
maybe_eos: bool,
}
impl Decoder {
fn new() -> Decoder {
Decoder {
state: 0,
maybe_eos: false,
}
}
// Decodes 4 bits
fn decode4(&mut self, input: u8) -> Result<Option<u8>> {
const MAYBE_EOS: u8 = 1;
const DECODED: u8 = 2;
const ERROR: u8 = 4;
// (next-state, byte, flags)
let (next, byte, flags) = DECODE_TABLE[self.state][input as usize];
if flags & ERROR == ERROR {
// Data followed the EOS marker
return Err(Error::InvalidHuffmanEncoding);
}
let ret = if flags & DECODED == DECODED {
Some(byte)
} else {
None
};
self.state = next;
self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS;
Ok(ret)
}
fn is_final(&self) -> bool {
self.state == 0 || self.maybe_eos
}
}
mod table;
|
{ b.to_ascii_lowercase() }
|
conditional_block
|
mod.rs
|
// Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::octets;
use super::Error;
use super::Result;
use self::table::DECODE_TABLE;
use self::table::ENCODE_TABLE;
pub fn decode(b: &mut octets::Octets) -> Result<Vec<u8>> {
// Max compression ratio is >= 0.5
let mut out = Vec::with_capacity(b.len() << 1);
let mut decoder = Decoder::new();
while b.cap() > 0 {
let byte = b.get_u8()?;
if let Some(b) = decoder.decode4(byte >> 4)? {
out.push(b);
}
if let Some(b) = decoder.decode4(byte & 0xf)? {
out.push(b);
}
}
if!decoder.is_final() {
return Err(Error::InvalidHuffmanEncoding);
}
Ok(out)
}
pub fn encode(src: &[u8], out: &mut octets::OctetsMut, low: bool) -> Result<()> {
let mut bits: u64 = 0;
let mut bits_left = 40;
for &b in src {
let b = if low { b.to_ascii_lowercase() } else { b };
let (nbits, code) = ENCODE_TABLE[b as usize];
bits |= code << (bits_left - nbits);
bits_left -= nbits;
while bits_left <= 32 {
out.put_u8((bits >> 32) as u8)?;
bits <<= 8;
bits_left += 8;
}
}
if bits_left!= 40 {
// This writes the EOS token
bits |= (1 << bits_left) - 1;
out.put_u8((bits >> 32) as u8)?;
}
Ok(())
}
pub fn encode_output_length(src: &[u8], low: bool) -> Result<usize> {
let mut bits: usize = 0;
for &b in src {
let b = if low { b.to_ascii_lowercase() } else { b };
let (nbits, _) = ENCODE_TABLE[b as usize];
bits += nbits;
}
let mut len = bits / 8;
if bits & 7!= 0 {
len += 1;
}
Ok(len)
}
struct Decoder {
state: usize,
maybe_eos: bool,
}
impl Decoder {
fn new() -> Decoder {
Decoder {
state: 0,
maybe_eos: false,
}
}
// Decodes 4 bits
fn decode4(&mut self, input: u8) -> Result<Option<u8>> {
const MAYBE_EOS: u8 = 1;
const DECODED: u8 = 2;
const ERROR: u8 = 4;
// (next-state, byte, flags)
let (next, byte, flags) = DECODE_TABLE[self.state][input as usize];
if flags & ERROR == ERROR {
// Data followed the EOS marker
return Err(Error::InvalidHuffmanEncoding);
}
let ret = if flags & DECODED == DECODED {
Some(byte)
} else {
None
};
self.state = next;
self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS;
Ok(ret)
}
fn
|
(&self) -> bool {
self.state == 0 || self.maybe_eos
}
}
mod table;
|
is_final
|
identifier_name
|
mod.rs
|
// Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::octets;
use super::Error;
use super::Result;
use self::table::DECODE_TABLE;
use self::table::ENCODE_TABLE;
pub fn decode(b: &mut octets::Octets) -> Result<Vec<u8>> {
// Max compression ratio is >= 0.5
let mut out = Vec::with_capacity(b.len() << 1);
let mut decoder = Decoder::new();
while b.cap() > 0 {
let byte = b.get_u8()?;
if let Some(b) = decoder.decode4(byte >> 4)? {
out.push(b);
}
if let Some(b) = decoder.decode4(byte & 0xf)? {
out.push(b);
}
}
if!decoder.is_final() {
return Err(Error::InvalidHuffmanEncoding);
}
Ok(out)
}
pub fn encode(src: &[u8], out: &mut octets::OctetsMut, low: bool) -> Result<()> {
let mut bits: u64 = 0;
let mut bits_left = 40;
for &b in src {
let b = if low { b.to_ascii_lowercase() } else { b };
let (nbits, code) = ENCODE_TABLE[b as usize];
bits |= code << (bits_left - nbits);
bits_left -= nbits;
while bits_left <= 32 {
out.put_u8((bits >> 32) as u8)?;
bits <<= 8;
bits_left += 8;
}
}
if bits_left!= 40 {
// This writes the EOS token
bits |= (1 << bits_left) - 1;
out.put_u8((bits >> 32) as u8)?;
}
Ok(())
}
pub fn encode_output_length(src: &[u8], low: bool) -> Result<usize> {
let mut bits: usize = 0;
for &b in src {
let b = if low { b.to_ascii_lowercase() } else { b };
|
let mut len = bits / 8;
if bits & 7!= 0 {
len += 1;
}
Ok(len)
}
struct Decoder {
state: usize,
maybe_eos: bool,
}
impl Decoder {
fn new() -> Decoder {
Decoder {
state: 0,
maybe_eos: false,
}
}
// Decodes 4 bits
fn decode4(&mut self, input: u8) -> Result<Option<u8>> {
const MAYBE_EOS: u8 = 1;
const DECODED: u8 = 2;
const ERROR: u8 = 4;
// (next-state, byte, flags)
let (next, byte, flags) = DECODE_TABLE[self.state][input as usize];
if flags & ERROR == ERROR {
// Data followed the EOS marker
return Err(Error::InvalidHuffmanEncoding);
}
let ret = if flags & DECODED == DECODED {
Some(byte)
} else {
None
};
self.state = next;
self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS;
Ok(ret)
}
fn is_final(&self) -> bool {
self.state == 0 || self.maybe_eos
}
}
mod table;
|
let (nbits, _) = ENCODE_TABLE[b as usize];
bits += nbits;
}
|
random_line_split
|
mod.rs
|
// Copyright (C) 2019, Cloudflare, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::octets;
use super::Error;
use super::Result;
use self::table::DECODE_TABLE;
use self::table::ENCODE_TABLE;
pub fn decode(b: &mut octets::Octets) -> Result<Vec<u8>> {
// Max compression ratio is >= 0.5
let mut out = Vec::with_capacity(b.len() << 1);
let mut decoder = Decoder::new();
while b.cap() > 0 {
let byte = b.get_u8()?;
if let Some(b) = decoder.decode4(byte >> 4)? {
out.push(b);
}
if let Some(b) = decoder.decode4(byte & 0xf)? {
out.push(b);
}
}
if!decoder.is_final() {
return Err(Error::InvalidHuffmanEncoding);
}
Ok(out)
}
pub fn encode(src: &[u8], out: &mut octets::OctetsMut, low: bool) -> Result<()> {
let mut bits: u64 = 0;
let mut bits_left = 40;
for &b in src {
let b = if low { b.to_ascii_lowercase() } else { b };
let (nbits, code) = ENCODE_TABLE[b as usize];
bits |= code << (bits_left - nbits);
bits_left -= nbits;
while bits_left <= 32 {
out.put_u8((bits >> 32) as u8)?;
bits <<= 8;
bits_left += 8;
}
}
if bits_left!= 40 {
// This writes the EOS token
bits |= (1 << bits_left) - 1;
out.put_u8((bits >> 32) as u8)?;
}
Ok(())
}
pub fn encode_output_length(src: &[u8], low: bool) -> Result<usize>
|
struct Decoder {
state: usize,
maybe_eos: bool,
}
impl Decoder {
fn new() -> Decoder {
Decoder {
state: 0,
maybe_eos: false,
}
}
// Decodes 4 bits
fn decode4(&mut self, input: u8) -> Result<Option<u8>> {
const MAYBE_EOS: u8 = 1;
const DECODED: u8 = 2;
const ERROR: u8 = 4;
// (next-state, byte, flags)
let (next, byte, flags) = DECODE_TABLE[self.state][input as usize];
if flags & ERROR == ERROR {
// Data followed the EOS marker
return Err(Error::InvalidHuffmanEncoding);
}
let ret = if flags & DECODED == DECODED {
Some(byte)
} else {
None
};
self.state = next;
self.maybe_eos = flags & MAYBE_EOS == MAYBE_EOS;
Ok(ret)
}
fn is_final(&self) -> bool {
self.state == 0 || self.maybe_eos
}
}
mod table;
|
{
let mut bits: usize = 0;
for &b in src {
let b = if low { b.to_ascii_lowercase() } else { b };
let (nbits, _) = ENCODE_TABLE[b as usize];
bits += nbits;
}
let mut len = bits / 8;
if bits & 7 != 0 {
len += 1;
}
Ok(len)
}
|
identifier_body
|
endian.rs
|
use std::marker::PhantomData;
use std::fmt;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use byteorder::{ByteOrder, LittleEndian, BigEndian, NativeEndian};
use uninitialized::uninitialized;
use packed::{Unaligned, Aligned, Packed};
use pod::Pod;
/// A type alias for unaligned little endian primitives
pub type Le<T> = EndianPrimitive<LittleEndian, T>;
/// A type alias for unaligned big endian primitives
pub type Be<T> = EndianPrimitive<BigEndian, T>;
/// A type alias for unaligned native endian primitives
pub type Native<T> = EndianPrimitive<NativeEndian, T>;
/// A POD container for a primitive that stores a value in the specified endianness
/// in memory, and transforms on `get`/`set`
#[repr(C)]
pub struct EndianPrimitive<B, T: EndianConvert> {
value: T::Unaligned,
_phantom: PhantomData<*const B>,
}
impl<B: ByteOrder, T: EndianConvert> EndianPrimitive<B, T> {
/// Creates a new value
#[inline]
pub fn new(v: T) -> Self {
EndianPrimitive {
value: EndianConvert::to::<B>(v),
_phantom: PhantomData,
}
}
/// Transforms to the native value
#[inline]
pub fn get(&self) -> T {
EndianConvert::from::<B>(&self.value)
}
/// Transforms from a native value
#[inline]
pub fn set(&mut self, v: T) {
self.value = EndianConvert::to::<B>(v)
}
/// Gets the inner untransformed value
#[inline]
pub fn raw(&self) -> &T::Unaligned {
&self.value
}
/// A mutable reference to the inner untransformed value
#[inline]
pub fn raw_mut(&mut self) -> &mut T::Unaligned {
&mut self.value
}
}
unsafe impl<B, T: EndianConvert> Pod for EndianPrimitive<B, T> { }
unsafe impl<B, T: EndianConvert> Unaligned for EndianPrimitive<B, T> { }
unsafe impl<B, T: EndianConvert> Packed for EndianPrimitive<B, T> { }
impl<B: ByteOrder, T: Default + EndianConvert> Default for EndianPrimitive<B, T> {
#[inline]
fn default() -> Self {
Self::new(Default::default())
}
}
impl<B: ByteOrder, T: EndianConvert> From<T> for EndianPrimitive<B, T> {
#[inline]
fn from(v: T) -> Self {
Self::new(v)
}
}
impl<B: ByteOrder, T: fmt::Debug + EndianConvert> fmt::Debug for EndianPrimitive<B, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Debug>::fmt(&self.get(), f)
}
}
impl<BRHS: ByteOrder, RHS: EndianConvert, B: ByteOrder, T: EndianConvert + PartialEq<RHS>> PartialEq<EndianPrimitive<BRHS, RHS>> for EndianPrimitive<B, T> {
|
fn eq(&self, other: &EndianPrimitive<BRHS, RHS>) -> bool {
self.get().eq(&other.get())
}
}
impl<B: ByteOrder, T: EndianConvert + Eq> Eq for EndianPrimitive<B, T> { }
impl<BRHS: ByteOrder, RHS: EndianConvert, B: ByteOrder, T: EndianConvert + PartialOrd<RHS>> PartialOrd<EndianPrimitive<BRHS, RHS>> for EndianPrimitive<B, T> {
#[inline]
fn partial_cmp(&self, other: &EndianPrimitive<BRHS, RHS>) -> Option<Ordering> {
self.get().partial_cmp(&other.get())
}
}
impl<B: ByteOrder, T: EndianConvert + Ord> Ord for EndianPrimitive<B, T> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.get().cmp(&other.get())
}
}
impl<B, T: EndianConvert + Hash> Hash for EndianPrimitive<B, T> where T::Unaligned: Hash {
#[inline]
fn hash<H: Hasher>(&self, h: &mut H) {
self.value.hash(h)
}
}
impl<B, T: EndianConvert> Clone for EndianPrimitive<B, T> {
#[inline]
fn clone(&self) -> Self {
EndianPrimitive {
value: self.value.clone(),
_phantom: PhantomData,
}
}
}
impl<B, T: EndianConvert> Copy for EndianPrimitive<B, T> { }
/// Describes a value that can be converted to and from a specified byte order.
pub trait EndianConvert: Aligned {
/// Converts a value from `B`
fn from<B: ByteOrder>(&Self::Unaligned) -> Self;
/// Converts a value to `B`
fn to<B: ByteOrder>(self) -> Self::Unaligned;
}
macro_rules! endian_impl {
($t:ty: $s:expr => $r:ident, $w:ident) => {
impl EndianConvert for $t {
#[inline]
fn from<B: ByteOrder>(s: &Self::Unaligned) -> Self {
B::$r(s)
}
#[inline]
fn to<B: ByteOrder>(self) -> Self::Unaligned {
let mut s: Self::Unaligned = unsafe { uninitialized() };
B::$w(&mut s, self);
s
}
}
};
}
endian_impl!(u16: 2 => read_u16, write_u16);
endian_impl!(i16: 2 => read_i16, write_i16);
endian_impl!(i32: 4 => read_i32, write_i32);
endian_impl!(u32: 4 => read_u32, write_u32);
endian_impl!(i64: 8 => read_i64, write_i64);
endian_impl!(u64: 8 => read_u64, write_u64);
endian_impl!(f32: 4 => read_f32, write_f32);
endian_impl!(f64: 8 => read_f64, write_f64);
impl EndianConvert for bool {
#[inline]
fn from<B: ByteOrder>(s: &Self::Unaligned) -> Self {
*s as u8!= 0
}
#[inline]
fn to<B: ByteOrder>(self) -> Self::Unaligned {
if self as u8!= 0 { true } else { false }
}
}
#[test]
fn endian_size() {
use std::mem::size_of;
use std::mem::align_of;
type B = NativeEndian;
assert_eq!(size_of::<EndianPrimitive<B, i16>>(), 2);
assert_eq!(size_of::<EndianPrimitive<B, i32>>(), 4);
assert_eq!(size_of::<EndianPrimitive<B, i64>>(), 8);
assert_eq!(size_of::<EndianPrimitive<B, f32>>(), 4);
assert_eq!(size_of::<EndianPrimitive<B, f64>>(), 8);
assert_eq!(align_of::<EndianPrimitive<B, bool>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i16>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i32>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i64>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, f32>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, f64>>(), 1);
}
|
#[inline]
|
random_line_split
|
endian.rs
|
use std::marker::PhantomData;
use std::fmt;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use byteorder::{ByteOrder, LittleEndian, BigEndian, NativeEndian};
use uninitialized::uninitialized;
use packed::{Unaligned, Aligned, Packed};
use pod::Pod;
/// A type alias for unaligned little endian primitives
pub type Le<T> = EndianPrimitive<LittleEndian, T>;
/// A type alias for unaligned big endian primitives
pub type Be<T> = EndianPrimitive<BigEndian, T>;
/// A type alias for unaligned native endian primitives
pub type Native<T> = EndianPrimitive<NativeEndian, T>;
/// A POD container for a primitive that stores a value in the specified endianness
/// in memory, and transforms on `get`/`set`
#[repr(C)]
pub struct EndianPrimitive<B, T: EndianConvert> {
value: T::Unaligned,
_phantom: PhantomData<*const B>,
}
impl<B: ByteOrder, T: EndianConvert> EndianPrimitive<B, T> {
/// Creates a new value
#[inline]
pub fn new(v: T) -> Self {
EndianPrimitive {
value: EndianConvert::to::<B>(v),
_phantom: PhantomData,
}
}
/// Transforms to the native value
#[inline]
pub fn get(&self) -> T {
EndianConvert::from::<B>(&self.value)
}
/// Transforms from a native value
#[inline]
pub fn set(&mut self, v: T) {
self.value = EndianConvert::to::<B>(v)
}
/// Gets the inner untransformed value
#[inline]
pub fn raw(&self) -> &T::Unaligned {
&self.value
}
/// A mutable reference to the inner untransformed value
#[inline]
pub fn raw_mut(&mut self) -> &mut T::Unaligned {
&mut self.value
}
}
unsafe impl<B, T: EndianConvert> Pod for EndianPrimitive<B, T> { }
unsafe impl<B, T: EndianConvert> Unaligned for EndianPrimitive<B, T> { }
unsafe impl<B, T: EndianConvert> Packed for EndianPrimitive<B, T> { }
impl<B: ByteOrder, T: Default + EndianConvert> Default for EndianPrimitive<B, T> {
#[inline]
fn default() -> Self {
Self::new(Default::default())
}
}
impl<B: ByteOrder, T: EndianConvert> From<T> for EndianPrimitive<B, T> {
#[inline]
fn from(v: T) -> Self {
Self::new(v)
}
}
impl<B: ByteOrder, T: fmt::Debug + EndianConvert> fmt::Debug for EndianPrimitive<B, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Debug>::fmt(&self.get(), f)
}
}
impl<BRHS: ByteOrder, RHS: EndianConvert, B: ByteOrder, T: EndianConvert + PartialEq<RHS>> PartialEq<EndianPrimitive<BRHS, RHS>> for EndianPrimitive<B, T> {
#[inline]
fn eq(&self, other: &EndianPrimitive<BRHS, RHS>) -> bool {
self.get().eq(&other.get())
}
}
impl<B: ByteOrder, T: EndianConvert + Eq> Eq for EndianPrimitive<B, T> { }
impl<BRHS: ByteOrder, RHS: EndianConvert, B: ByteOrder, T: EndianConvert + PartialOrd<RHS>> PartialOrd<EndianPrimitive<BRHS, RHS>> for EndianPrimitive<B, T> {
#[inline]
fn partial_cmp(&self, other: &EndianPrimitive<BRHS, RHS>) -> Option<Ordering> {
self.get().partial_cmp(&other.get())
}
}
impl<B: ByteOrder, T: EndianConvert + Ord> Ord for EndianPrimitive<B, T> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.get().cmp(&other.get())
}
}
impl<B, T: EndianConvert + Hash> Hash for EndianPrimitive<B, T> where T::Unaligned: Hash {
#[inline]
fn hash<H: Hasher>(&self, h: &mut H) {
self.value.hash(h)
}
}
impl<B, T: EndianConvert> Clone for EndianPrimitive<B, T> {
#[inline]
fn clone(&self) -> Self {
EndianPrimitive {
value: self.value.clone(),
_phantom: PhantomData,
}
}
}
impl<B, T: EndianConvert> Copy for EndianPrimitive<B, T> { }
/// Describes a value that can be converted to and from a specified byte order.
pub trait EndianConvert: Aligned {
/// Converts a value from `B`
fn from<B: ByteOrder>(&Self::Unaligned) -> Self;
/// Converts a value to `B`
fn to<B: ByteOrder>(self) -> Self::Unaligned;
}
macro_rules! endian_impl {
($t:ty: $s:expr => $r:ident, $w:ident) => {
impl EndianConvert for $t {
#[inline]
fn from<B: ByteOrder>(s: &Self::Unaligned) -> Self {
B::$r(s)
}
#[inline]
fn to<B: ByteOrder>(self) -> Self::Unaligned {
let mut s: Self::Unaligned = unsafe { uninitialized() };
B::$w(&mut s, self);
s
}
}
};
}
endian_impl!(u16: 2 => read_u16, write_u16);
endian_impl!(i16: 2 => read_i16, write_i16);
endian_impl!(i32: 4 => read_i32, write_i32);
endian_impl!(u32: 4 => read_u32, write_u32);
endian_impl!(i64: 8 => read_i64, write_i64);
endian_impl!(u64: 8 => read_u64, write_u64);
endian_impl!(f32: 4 => read_f32, write_f32);
endian_impl!(f64: 8 => read_f64, write_f64);
impl EndianConvert for bool {
#[inline]
fn from<B: ByteOrder>(s: &Self::Unaligned) -> Self {
*s as u8!= 0
}
#[inline]
fn to<B: ByteOrder>(self) -> Self::Unaligned {
if self as u8!= 0
|
else { false }
}
}
#[test]
fn endian_size() {
use std::mem::size_of;
use std::mem::align_of;
type B = NativeEndian;
assert_eq!(size_of::<EndianPrimitive<B, i16>>(), 2);
assert_eq!(size_of::<EndianPrimitive<B, i32>>(), 4);
assert_eq!(size_of::<EndianPrimitive<B, i64>>(), 8);
assert_eq!(size_of::<EndianPrimitive<B, f32>>(), 4);
assert_eq!(size_of::<EndianPrimitive<B, f64>>(), 8);
assert_eq!(align_of::<EndianPrimitive<B, bool>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i16>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i32>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i64>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, f32>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, f64>>(), 1);
}
|
{ true }
|
conditional_block
|
endian.rs
|
use std::marker::PhantomData;
use std::fmt;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use byteorder::{ByteOrder, LittleEndian, BigEndian, NativeEndian};
use uninitialized::uninitialized;
use packed::{Unaligned, Aligned, Packed};
use pod::Pod;
/// A type alias for unaligned little endian primitives
pub type Le<T> = EndianPrimitive<LittleEndian, T>;
/// A type alias for unaligned big endian primitives
pub type Be<T> = EndianPrimitive<BigEndian, T>;
/// A type alias for unaligned native endian primitives
pub type Native<T> = EndianPrimitive<NativeEndian, T>;
/// A POD container for a primitive that stores a value in the specified endianness
/// in memory, and transforms on `get`/`set`
#[repr(C)]
pub struct EndianPrimitive<B, T: EndianConvert> {
value: T::Unaligned,
_phantom: PhantomData<*const B>,
}
impl<B: ByteOrder, T: EndianConvert> EndianPrimitive<B, T> {
/// Creates a new value
#[inline]
pub fn new(v: T) -> Self {
EndianPrimitive {
value: EndianConvert::to::<B>(v),
_phantom: PhantomData,
}
}
/// Transforms to the native value
#[inline]
pub fn get(&self) -> T {
EndianConvert::from::<B>(&self.value)
}
/// Transforms from a native value
#[inline]
pub fn set(&mut self, v: T) {
self.value = EndianConvert::to::<B>(v)
}
/// Gets the inner untransformed value
#[inline]
pub fn
|
(&self) -> &T::Unaligned {
&self.value
}
/// A mutable reference to the inner untransformed value
#[inline]
pub fn raw_mut(&mut self) -> &mut T::Unaligned {
&mut self.value
}
}
unsafe impl<B, T: EndianConvert> Pod for EndianPrimitive<B, T> { }
unsafe impl<B, T: EndianConvert> Unaligned for EndianPrimitive<B, T> { }
unsafe impl<B, T: EndianConvert> Packed for EndianPrimitive<B, T> { }
impl<B: ByteOrder, T: Default + EndianConvert> Default for EndianPrimitive<B, T> {
#[inline]
fn default() -> Self {
Self::new(Default::default())
}
}
impl<B: ByteOrder, T: EndianConvert> From<T> for EndianPrimitive<B, T> {
#[inline]
fn from(v: T) -> Self {
Self::new(v)
}
}
impl<B: ByteOrder, T: fmt::Debug + EndianConvert> fmt::Debug for EndianPrimitive<B, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Debug>::fmt(&self.get(), f)
}
}
impl<BRHS: ByteOrder, RHS: EndianConvert, B: ByteOrder, T: EndianConvert + PartialEq<RHS>> PartialEq<EndianPrimitive<BRHS, RHS>> for EndianPrimitive<B, T> {
#[inline]
fn eq(&self, other: &EndianPrimitive<BRHS, RHS>) -> bool {
self.get().eq(&other.get())
}
}
impl<B: ByteOrder, T: EndianConvert + Eq> Eq for EndianPrimitive<B, T> { }
impl<BRHS: ByteOrder, RHS: EndianConvert, B: ByteOrder, T: EndianConvert + PartialOrd<RHS>> PartialOrd<EndianPrimitive<BRHS, RHS>> for EndianPrimitive<B, T> {
#[inline]
fn partial_cmp(&self, other: &EndianPrimitive<BRHS, RHS>) -> Option<Ordering> {
self.get().partial_cmp(&other.get())
}
}
impl<B: ByteOrder, T: EndianConvert + Ord> Ord for EndianPrimitive<B, T> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.get().cmp(&other.get())
}
}
impl<B, T: EndianConvert + Hash> Hash for EndianPrimitive<B, T> where T::Unaligned: Hash {
#[inline]
fn hash<H: Hasher>(&self, h: &mut H) {
self.value.hash(h)
}
}
impl<B, T: EndianConvert> Clone for EndianPrimitive<B, T> {
#[inline]
fn clone(&self) -> Self {
EndianPrimitive {
value: self.value.clone(),
_phantom: PhantomData,
}
}
}
impl<B, T: EndianConvert> Copy for EndianPrimitive<B, T> { }
/// Describes a value that can be converted to and from a specified byte order.
pub trait EndianConvert: Aligned {
/// Converts a value from `B`
fn from<B: ByteOrder>(&Self::Unaligned) -> Self;
/// Converts a value to `B`
fn to<B: ByteOrder>(self) -> Self::Unaligned;
}
macro_rules! endian_impl {
($t:ty: $s:expr => $r:ident, $w:ident) => {
impl EndianConvert for $t {
#[inline]
fn from<B: ByteOrder>(s: &Self::Unaligned) -> Self {
B::$r(s)
}
#[inline]
fn to<B: ByteOrder>(self) -> Self::Unaligned {
let mut s: Self::Unaligned = unsafe { uninitialized() };
B::$w(&mut s, self);
s
}
}
};
}
endian_impl!(u16: 2 => read_u16, write_u16);
endian_impl!(i16: 2 => read_i16, write_i16);
endian_impl!(i32: 4 => read_i32, write_i32);
endian_impl!(u32: 4 => read_u32, write_u32);
endian_impl!(i64: 8 => read_i64, write_i64);
endian_impl!(u64: 8 => read_u64, write_u64);
endian_impl!(f32: 4 => read_f32, write_f32);
endian_impl!(f64: 8 => read_f64, write_f64);
impl EndianConvert for bool {
#[inline]
fn from<B: ByteOrder>(s: &Self::Unaligned) -> Self {
*s as u8!= 0
}
#[inline]
fn to<B: ByteOrder>(self) -> Self::Unaligned {
if self as u8!= 0 { true } else { false }
}
}
#[test]
fn endian_size() {
use std::mem::size_of;
use std::mem::align_of;
type B = NativeEndian;
assert_eq!(size_of::<EndianPrimitive<B, i16>>(), 2);
assert_eq!(size_of::<EndianPrimitive<B, i32>>(), 4);
assert_eq!(size_of::<EndianPrimitive<B, i64>>(), 8);
assert_eq!(size_of::<EndianPrimitive<B, f32>>(), 4);
assert_eq!(size_of::<EndianPrimitive<B, f64>>(), 8);
assert_eq!(align_of::<EndianPrimitive<B, bool>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i16>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i32>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i64>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, f32>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, f64>>(), 1);
}
|
raw
|
identifier_name
|
endian.rs
|
use std::marker::PhantomData;
use std::fmt;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use byteorder::{ByteOrder, LittleEndian, BigEndian, NativeEndian};
use uninitialized::uninitialized;
use packed::{Unaligned, Aligned, Packed};
use pod::Pod;
/// A type alias for unaligned little endian primitives
pub type Le<T> = EndianPrimitive<LittleEndian, T>;
/// A type alias for unaligned big endian primitives
pub type Be<T> = EndianPrimitive<BigEndian, T>;
/// A type alias for unaligned native endian primitives
pub type Native<T> = EndianPrimitive<NativeEndian, T>;
/// A POD container for a primitive that stores a value in the specified endianness
/// in memory, and transforms on `get`/`set`
#[repr(C)]
pub struct EndianPrimitive<B, T: EndianConvert> {
value: T::Unaligned,
_phantom: PhantomData<*const B>,
}
impl<B: ByteOrder, T: EndianConvert> EndianPrimitive<B, T> {
/// Creates a new value
#[inline]
pub fn new(v: T) -> Self {
EndianPrimitive {
value: EndianConvert::to::<B>(v),
_phantom: PhantomData,
}
}
/// Transforms to the native value
#[inline]
pub fn get(&self) -> T {
EndianConvert::from::<B>(&self.value)
}
/// Transforms from a native value
#[inline]
pub fn set(&mut self, v: T) {
self.value = EndianConvert::to::<B>(v)
}
/// Gets the inner untransformed value
#[inline]
pub fn raw(&self) -> &T::Unaligned {
&self.value
}
/// A mutable reference to the inner untransformed value
#[inline]
pub fn raw_mut(&mut self) -> &mut T::Unaligned {
&mut self.value
}
}
unsafe impl<B, T: EndianConvert> Pod for EndianPrimitive<B, T> { }
unsafe impl<B, T: EndianConvert> Unaligned for EndianPrimitive<B, T> { }
unsafe impl<B, T: EndianConvert> Packed for EndianPrimitive<B, T> { }
impl<B: ByteOrder, T: Default + EndianConvert> Default for EndianPrimitive<B, T> {
#[inline]
fn default() -> Self {
Self::new(Default::default())
}
}
impl<B: ByteOrder, T: EndianConvert> From<T> for EndianPrimitive<B, T> {
#[inline]
fn from(v: T) -> Self {
Self::new(v)
}
}
impl<B: ByteOrder, T: fmt::Debug + EndianConvert> fmt::Debug for EndianPrimitive<B, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<T as fmt::Debug>::fmt(&self.get(), f)
}
}
impl<BRHS: ByteOrder, RHS: EndianConvert, B: ByteOrder, T: EndianConvert + PartialEq<RHS>> PartialEq<EndianPrimitive<BRHS, RHS>> for EndianPrimitive<B, T> {
#[inline]
fn eq(&self, other: &EndianPrimitive<BRHS, RHS>) -> bool
|
}
impl<B: ByteOrder, T: EndianConvert + Eq> Eq for EndianPrimitive<B, T> { }
impl<BRHS: ByteOrder, RHS: EndianConvert, B: ByteOrder, T: EndianConvert + PartialOrd<RHS>> PartialOrd<EndianPrimitive<BRHS, RHS>> for EndianPrimitive<B, T> {
#[inline]
fn partial_cmp(&self, other: &EndianPrimitive<BRHS, RHS>) -> Option<Ordering> {
self.get().partial_cmp(&other.get())
}
}
impl<B: ByteOrder, T: EndianConvert + Ord> Ord for EndianPrimitive<B, T> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.get().cmp(&other.get())
}
}
impl<B, T: EndianConvert + Hash> Hash for EndianPrimitive<B, T> where T::Unaligned: Hash {
#[inline]
fn hash<H: Hasher>(&self, h: &mut H) {
self.value.hash(h)
}
}
impl<B, T: EndianConvert> Clone for EndianPrimitive<B, T> {
#[inline]
fn clone(&self) -> Self {
EndianPrimitive {
value: self.value.clone(),
_phantom: PhantomData,
}
}
}
impl<B, T: EndianConvert> Copy for EndianPrimitive<B, T> { }
/// Describes a value that can be converted to and from a specified byte order.
pub trait EndianConvert: Aligned {
/// Converts a value from `B`
fn from<B: ByteOrder>(&Self::Unaligned) -> Self;
/// Converts a value to `B`
fn to<B: ByteOrder>(self) -> Self::Unaligned;
}
macro_rules! endian_impl {
($t:ty: $s:expr => $r:ident, $w:ident) => {
impl EndianConvert for $t {
#[inline]
fn from<B: ByteOrder>(s: &Self::Unaligned) -> Self {
B::$r(s)
}
#[inline]
fn to<B: ByteOrder>(self) -> Self::Unaligned {
let mut s: Self::Unaligned = unsafe { uninitialized() };
B::$w(&mut s, self);
s
}
}
};
}
endian_impl!(u16: 2 => read_u16, write_u16);
endian_impl!(i16: 2 => read_i16, write_i16);
endian_impl!(i32: 4 => read_i32, write_i32);
endian_impl!(u32: 4 => read_u32, write_u32);
endian_impl!(i64: 8 => read_i64, write_i64);
endian_impl!(u64: 8 => read_u64, write_u64);
endian_impl!(f32: 4 => read_f32, write_f32);
endian_impl!(f64: 8 => read_f64, write_f64);
impl EndianConvert for bool {
#[inline]
fn from<B: ByteOrder>(s: &Self::Unaligned) -> Self {
*s as u8!= 0
}
#[inline]
fn to<B: ByteOrder>(self) -> Self::Unaligned {
if self as u8!= 0 { true } else { false }
}
}
#[test]
fn endian_size() {
use std::mem::size_of;
use std::mem::align_of;
type B = NativeEndian;
assert_eq!(size_of::<EndianPrimitive<B, i16>>(), 2);
assert_eq!(size_of::<EndianPrimitive<B, i32>>(), 4);
assert_eq!(size_of::<EndianPrimitive<B, i64>>(), 8);
assert_eq!(size_of::<EndianPrimitive<B, f32>>(), 4);
assert_eq!(size_of::<EndianPrimitive<B, f64>>(), 8);
assert_eq!(align_of::<EndianPrimitive<B, bool>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i16>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i32>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, i64>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, f32>>(), 1);
assert_eq!(align_of::<EndianPrimitive<B, f64>>(), 1);
}
|
{
self.get().eq(&other.get())
}
|
identifier_body
|
deriving-via-extension-struct-tuple.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(PartialEq, Debug)]
struct Foo(isize, isize, String);
pub fn
|
() {
let a1 = Foo(5, 6, "abc".to_string());
let a2 = Foo(5, 6, "abc".to_string());
let b = Foo(5, 7, "def".to_string());
assert_eq!(a1, a1);
assert_eq!(a2, a1);
assert!(!(a1 == b));
assert!(a1!= b);
assert!(!(a1!= a1));
assert!(!(a2!= a1));
}
|
main
|
identifier_name
|
deriving-via-extension-struct-tuple.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(PartialEq, Debug)]
struct Foo(isize, isize, String);
pub fn main() {
let a1 = Foo(5, 6, "abc".to_string());
let a2 = Foo(5, 6, "abc".to_string());
let b = Foo(5, 7, "def".to_string());
assert_eq!(a1, a1);
assert_eq!(a2, a1);
|
assert!(!(a2!= a1));
}
|
assert!(!(a1 == b));
assert!(a1 != b);
assert!(!(a1 != a1));
|
random_line_split
|
array2d.rs
|
#[allow(dead_code)]
use std::ops;
use std::fmt;
use std::vec;
use std::slice;
#[derive(Clone)]
/// Two dimensional array
pub struct Array2D<T> {
data: Vec<Vec<T>>,
dim: (usize, usize)
}
impl<T> Array2D<T> where T: Default + Clone {
/// Initialize dim-sized array of zeros (default value)
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let a: Array2D<u8> = Array2D::zeros((3, 4));
/// println!("a = {:?}", a);
/// // a = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]
/// ```
pub fn zeros(dim: (usize, usize)) -> Array2D<T> {
let (n, m) = dim;
let mut data = vec![Vec::new(); m];
for item in data.iter_mut() {
*item = vec![T::default(); n];
}
Array2D { data: data, dim: dim }
}
}
impl<T> Array2D<T> {
/// Size of Array2D (row, column)
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let m: Array2D<u8> = Array2D::zeros((3, 4));
/// assert_eq!(m.dim(), (3, 4));
/// ```
pub fn dim(&self) -> (usize, usize) {
self.dim
}
/// Performs the conversion from Array2D to Vec
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let slice_obj: &[&[_]] = &[
/// &[1, 2, 3],
/// &[4, 5, 6]
/// ];
/// let vec_obj = vec![
/// vec![1, 2, 3],
/// vec![4, 5, 6]
/// ];
/// let m = Array2D::from(slice_obj);
/// let res = m.to_vec();
/// assert_eq!(res, vec_obj);
/// ```
pub fn
|
(self) -> Vec<Vec<T>> {
self.data
}
}
impl<T> From<Vec<Vec<T>>> for Array2D<T> {
/// Performs the conversion from Vec to Array2D
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let vec_obj = vec![
/// vec![1, 2, 3],
/// vec![4, 5, 6],
/// vec![7, 8, 9]
/// ];
/// let m = Array2D::from(vec_obj);
/// let r = format!("m = {:?}", m);
/// assert_eq!(r, "m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]");
/// ```
fn from(v: Vec<Vec<T>>) -> Self {
let dim = (v.len(), v[0].len());
Array2D { data: v, dim: dim }
}
}
impl<'a, T: Clone> From<&'a [&'a [T]]> for Array2D<T> {
/// Performs the conversion from slice to Array2D
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let slice_obj: &[&[_]] = &[
/// &[1, 2, 3],
/// &[4, 5, 6]
/// ];
/// let m = Array2D::from(slice_obj);
/// let r = format!("m = {:?}", m);
/// assert_eq!(r, "m = [[1, 2, 3], [4, 5, 6]]");
/// ```
fn from(v: &'a [&'a [T]]) -> Self {
let dim = (v.len(), v[0].len());
let mut data = vec![Vec::new(); dim.0];
for (m, e) in data.iter_mut().zip(v) {
*m = Vec::from(*e);
}
Array2D { data: data, dim: dim }
}
}
impl<T> ops::Index<(usize, usize)> for Array2D<T> {
type Output = T;
fn index(&self, index: (usize, usize)) -> &Self::Output {
&(self.data[index.0])[index.1]
}
}
impl<T> ops::IndexMut<(usize, usize)> for Array2D<T> {
fn index_mut(&mut self, index: (usize, usize)) -> &mut T {
&mut (self.data[index.0])[index.1]
}
}
impl<T> fmt::Debug for Array2D<T> where T: fmt::Debug {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.data)
}
}
impl<T> IntoIterator for Array2D<T> {
type Item = Vec<T>;
type IntoIter = vec::IntoIter<Vec<T>>;
fn into_iter(self) -> Self::IntoIter {
self.data.into_iter()
}
}
impl<'a, T> IntoIterator for &'a Array2D<T> {
type Item = &'a Vec<T>;
type IntoIter = slice::Iter<'a, Vec<T>>;
fn into_iter(self) -> Self::IntoIter {
self.data.iter()
}
}
// TODO: rewrite it
impl<'a, T> Array2D<T> {
/// Returns an iterator over the slice.
pub fn iter(&'a self) -> slice::Iter<'a, Vec<T>> {
self.data.iter()
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&'a mut self) -> slice::IterMut<'a, Vec<T>> {
self.data.iter_mut()
}
}
|
to_vec
|
identifier_name
|
array2d.rs
|
#[allow(dead_code)]
use std::ops;
use std::fmt;
use std::vec;
use std::slice;
#[derive(Clone)]
/// Two dimensional array
pub struct Array2D<T> {
data: Vec<Vec<T>>,
dim: (usize, usize)
}
impl<T> Array2D<T> where T: Default + Clone {
/// Initialize dim-sized array of zeros (default value)
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let a: Array2D<u8> = Array2D::zeros((3, 4));
/// println!("a = {:?}", a);
/// // a = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]
/// ```
pub fn zeros(dim: (usize, usize)) -> Array2D<T> {
let (n, m) = dim;
let mut data = vec![Vec::new(); m];
for item in data.iter_mut() {
*item = vec![T::default(); n];
}
Array2D { data: data, dim: dim }
}
}
impl<T> Array2D<T> {
/// Size of Array2D (row, column)
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let m: Array2D<u8> = Array2D::zeros((3, 4));
/// assert_eq!(m.dim(), (3, 4));
/// ```
pub fn dim(&self) -> (usize, usize) {
self.dim
}
/// Performs the conversion from Array2D to Vec
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let slice_obj: &[&[_]] = &[
/// &[1, 2, 3],
/// &[4, 5, 6]
/// ];
/// let vec_obj = vec![
/// vec![1, 2, 3],
/// vec![4, 5, 6]
/// ];
/// let m = Array2D::from(slice_obj);
/// let res = m.to_vec();
/// assert_eq!(res, vec_obj);
/// ```
pub fn to_vec(self) -> Vec<Vec<T>> {
self.data
}
}
impl<T> From<Vec<Vec<T>>> for Array2D<T> {
/// Performs the conversion from Vec to Array2D
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let vec_obj = vec![
/// vec![1, 2, 3],
/// vec![4, 5, 6],
/// vec![7, 8, 9]
/// ];
/// let m = Array2D::from(vec_obj);
/// let r = format!("m = {:?}", m);
/// assert_eq!(r, "m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]");
/// ```
fn from(v: Vec<Vec<T>>) -> Self {
let dim = (v.len(), v[0].len());
Array2D { data: v, dim: dim }
}
}
impl<'a, T: Clone> From<&'a [&'a [T]]> for Array2D<T> {
/// Performs the conversion from slice to Array2D
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let slice_obj: &[&[_]] = &[
/// &[1, 2, 3],
/// &[4, 5, 6]
/// ];
/// let m = Array2D::from(slice_obj);
/// let r = format!("m = {:?}", m);
/// assert_eq!(r, "m = [[1, 2, 3], [4, 5, 6]]");
/// ```
fn from(v: &'a [&'a [T]]) -> Self {
let dim = (v.len(), v[0].len());
let mut data = vec![Vec::new(); dim.0];
for (m, e) in data.iter_mut().zip(v) {
*m = Vec::from(*e);
}
Array2D { data: data, dim: dim }
}
}
impl<T> ops::Index<(usize, usize)> for Array2D<T> {
type Output = T;
fn index(&self, index: (usize, usize)) -> &Self::Output {
&(self.data[index.0])[index.1]
}
}
impl<T> ops::IndexMut<(usize, usize)> for Array2D<T> {
fn index_mut(&mut self, index: (usize, usize)) -> &mut T {
&mut (self.data[index.0])[index.1]
}
}
impl<T> fmt::Debug for Array2D<T> where T: fmt::Debug {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.data)
}
}
impl<T> IntoIterator for Array2D<T> {
type Item = Vec<T>;
type IntoIter = vec::IntoIter<Vec<T>>;
fn into_iter(self) -> Self::IntoIter {
self.data.into_iter()
}
}
impl<'a, T> IntoIterator for &'a Array2D<T> {
type Item = &'a Vec<T>;
type IntoIter = slice::Iter<'a, Vec<T>>;
fn into_iter(self) -> Self::IntoIter {
self.data.iter()
}
}
// TODO: rewrite it
impl<'a, T> Array2D<T> {
/// Returns an iterator over the slice.
pub fn iter(&'a self) -> slice::Iter<'a, Vec<T>> {
self.data.iter()
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&'a mut self) -> slice::IterMut<'a, Vec<T>>
|
}
|
{
self.data.iter_mut()
}
|
identifier_body
|
array2d.rs
|
#[allow(dead_code)]
use std::ops;
use std::fmt;
use std::vec;
use std::slice;
#[derive(Clone)]
/// Two dimensional array
pub struct Array2D<T> {
data: Vec<Vec<T>>,
dim: (usize, usize)
}
impl<T> Array2D<T> where T: Default + Clone {
/// Initialize dim-sized array of zeros (default value)
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let a: Array2D<u8> = Array2D::zeros((3, 4));
/// println!("a = {:?}", a);
/// // a = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]
/// ```
pub fn zeros(dim: (usize, usize)) -> Array2D<T> {
let (n, m) = dim;
let mut data = vec![Vec::new(); m];
for item in data.iter_mut() {
*item = vec![T::default(); n];
}
Array2D { data: data, dim: dim }
}
}
impl<T> Array2D<T> {
/// Size of Array2D (row, column)
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let m: Array2D<u8> = Array2D::zeros((3, 4));
/// assert_eq!(m.dim(), (3, 4));
/// ```
pub fn dim(&self) -> (usize, usize) {
self.dim
}
/// Performs the conversion from Array2D to Vec
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let slice_obj: &[&[_]] = &[
/// &[1, 2, 3],
/// &[4, 5, 6]
/// ];
/// let vec_obj = vec![
/// vec![1, 2, 3],
/// vec![4, 5, 6]
/// ];
/// let m = Array2D::from(slice_obj);
/// let res = m.to_vec();
/// assert_eq!(res, vec_obj);
/// ```
pub fn to_vec(self) -> Vec<Vec<T>> {
self.data
}
}
impl<T> From<Vec<Vec<T>>> for Array2D<T> {
/// Performs the conversion from Vec to Array2D
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let vec_obj = vec![
/// vec![1, 2, 3],
/// vec![4, 5, 6],
/// vec![7, 8, 9]
/// ];
/// let m = Array2D::from(vec_obj);
/// let r = format!("m = {:?}", m);
/// assert_eq!(r, "m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]");
/// ```
fn from(v: Vec<Vec<T>>) -> Self {
let dim = (v.len(), v[0].len());
Array2D { data: v, dim: dim }
}
}
impl<'a, T: Clone> From<&'a [&'a [T]]> for Array2D<T> {
/// Performs the conversion from slice to Array2D
///
/// # Example
/// ```
/// use arrays::Array2D;
///
/// let slice_obj: &[&[_]] = &[
/// &[1, 2, 3],
/// &[4, 5, 6]
/// ];
/// let m = Array2D::from(slice_obj);
/// let r = format!("m = {:?}", m);
/// assert_eq!(r, "m = [[1, 2, 3], [4, 5, 6]]");
/// ```
fn from(v: &'a [&'a [T]]) -> Self {
let dim = (v.len(), v[0].len());
let mut data = vec![Vec::new(); dim.0];
for (m, e) in data.iter_mut().zip(v) {
*m = Vec::from(*e);
}
Array2D { data: data, dim: dim }
}
}
impl<T> ops::Index<(usize, usize)> for Array2D<T> {
type Output = T;
fn index(&self, index: (usize, usize)) -> &Self::Output {
&(self.data[index.0])[index.1]
}
}
impl<T> ops::IndexMut<(usize, usize)> for Array2D<T> {
fn index_mut(&mut self, index: (usize, usize)) -> &mut T {
&mut (self.data[index.0])[index.1]
}
}
impl<T> fmt::Debug for Array2D<T> where T: fmt::Debug {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.data)
}
}
impl<T> IntoIterator for Array2D<T> {
type Item = Vec<T>;
type IntoIter = vec::IntoIter<Vec<T>>;
fn into_iter(self) -> Self::IntoIter {
self.data.into_iter()
}
}
impl<'a, T> IntoIterator for &'a Array2D<T> {
type Item = &'a Vec<T>;
type IntoIter = slice::Iter<'a, Vec<T>>;
|
fn into_iter(self) -> Self::IntoIter {
self.data.iter()
}
}
// TODO: rewrite it
impl<'a, T> Array2D<T> {
/// Returns an iterator over the slice.
pub fn iter(&'a self) -> slice::Iter<'a, Vec<T>> {
self.data.iter()
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&'a mut self) -> slice::IterMut<'a, Vec<T>> {
self.data.iter_mut()
}
}
|
random_line_split
|
|
types.rs
|
#[derive(Clone, Serialize)]
pub struct ClanStats {
pub tag: String,
pub name: String,
pub color: String,
pub members: usize,
pub description: String,
pub motto: String,
pub id: String,
pub emblems: ClanStatsEmblems,
pub blocked: bool,
pub stats: HistoryClanStats,
}
impl ClanStats {
pub fn empty() -> Self {
Self {
tag: String::new(),
name: String::new(),
color: String::new(),
members: 0,
description: String::new(),
motto: String::new(),
id: String::new(),
emblems: ClanStatsEmblems::empty(),
blocked: true,
stats: HistoryClanStats::empty(),
}
}
}
#[derive(Clone, Serialize)]
pub struct ClanStatsEmblems {
pub x256_wowp: String,
pub x195_portal: String,
pub x64_portal: String,
pub x64_wot: String,
pub x32_portal: String,
pub x24_portal: String,
}
impl ClanStatsEmblems {
pub fn empty() -> Self {
Self {
x256_wowp: String::new(),
x195_portal: String::new(),
x64_portal: String::new(),
x64_wot: String::new(),
x32_portal: String::new(),
x24_portal: String::new(),
}
}
}
// HistoryClanStats this is what to be exepected to get back per clan from the database when requesting old clan data
#[derive(Clone, Serialize)]
pub struct HistoryClanStats {
pub tag: String,
pub name: String,
pub id: String,
pub members: usize,
pub battles: Option<f64>,
|
pub fb_elo10: Option<f64>,
pub fb_elo8: Option<f64>,
pub fb_elo6: Option<f64>,
pub fb_elo: Option<f64>,
pub gm_elo10: Option<f64>,
pub gm_elo8: Option<f64>,
pub gm_elo6: Option<f64>,
pub gm_elo: Option<f64>,
pub glob_rating: Option<f64>,
pub glob_rating_weighted: Option<f64>,
pub win_ratio: Option<f64>,
pub v10l: Option<f64>,
}
impl HistoryClanStats {
pub fn empty() -> Self {
Self {
tag: String::new(),
name: String::new(),
id: String::new(),
members: 0,
battles: None,
daily_battles: None,
efficiency: None,
fb_elo10: None,
fb_elo8: None,
fb_elo6: None,
fb_elo: None,
gm_elo10: None,
gm_elo8: None,
gm_elo6: None,
gm_elo: None,
glob_rating: None,
glob_rating_weighted: None,
win_ratio: None,
v10l: None,
}
}
}
// HistoryCollectionItem is what the contens is of 1 hisotry colleciton item
#[derive(Clone, Serialize)]
pub struct HistoryCollectionItem {
pub date: String,
pub stats: Vec<HistoryClanStats>,
}
// User defines what a user is
#[derive(Clone, Serialize)]
pub struct User {
pub rights: String,
pub userid: isize,
pub nickname: String,
}
// ClanPositionEvery this shows the clan possition in all type stats
#[derive(Clone, Serialize)]
pub struct ClanPositionEvery {
pub v10l: u32,
pub winratio: u32,
pub global_weighted: u32,
pub global: u32,
pub gm_elo: u32,
pub gm_elo6: u32,
pub gm_elo8: u32,
pub gm_elo10: u32,
pub fb_elo: u32,
pub fb_elo6: u32,
pub fb_elo8: u32,
pub fb_elo10: u32,
pub efficiency: u32,
pub daily_battles: u32,
pub battles: u32,
pub members: u32,
}
impl ClanPositionEvery {
pub fn empty() -> Self {
Self {
v10l: 0,
winratio: 0,
global_weighted: 0,
global: 0,
gm_elo: 0,
gm_elo6: 0,
gm_elo8: 0,
gm_elo10: 0,
fb_elo: 0,
fb_elo6: 0,
fb_elo8: 0,
fb_elo10: 0,
efficiency: 0,
daily_battles: 0,
battles: 0,
members: 0,
}
}
}
// ClanNameAndTag is a type that has the tag, name and id of a clan
#[derive(Clone, Serialize)]
pub struct ClanNameAndTag {
pub tag: String,
pub name: String,
}
|
pub daily_battles: Option<f64>,
pub efficiency: Option<f64>,
|
random_line_split
|
types.rs
|
#[derive(Clone, Serialize)]
pub struct ClanStats {
pub tag: String,
pub name: String,
pub color: String,
pub members: usize,
pub description: String,
pub motto: String,
pub id: String,
pub emblems: ClanStatsEmblems,
pub blocked: bool,
pub stats: HistoryClanStats,
}
impl ClanStats {
pub fn empty() -> Self {
Self {
tag: String::new(),
name: String::new(),
color: String::new(),
members: 0,
description: String::new(),
motto: String::new(),
id: String::new(),
emblems: ClanStatsEmblems::empty(),
blocked: true,
stats: HistoryClanStats::empty(),
}
}
}
#[derive(Clone, Serialize)]
pub struct ClanStatsEmblems {
pub x256_wowp: String,
pub x195_portal: String,
pub x64_portal: String,
pub x64_wot: String,
pub x32_portal: String,
pub x24_portal: String,
}
impl ClanStatsEmblems {
pub fn empty() -> Self {
Self {
x256_wowp: String::new(),
x195_portal: String::new(),
x64_portal: String::new(),
x64_wot: String::new(),
x32_portal: String::new(),
x24_portal: String::new(),
}
}
}
// HistoryClanStats this is what to be exepected to get back per clan from the database when requesting old clan data
#[derive(Clone, Serialize)]
pub struct HistoryClanStats {
pub tag: String,
pub name: String,
pub id: String,
pub members: usize,
pub battles: Option<f64>,
pub daily_battles: Option<f64>,
pub efficiency: Option<f64>,
pub fb_elo10: Option<f64>,
pub fb_elo8: Option<f64>,
pub fb_elo6: Option<f64>,
pub fb_elo: Option<f64>,
pub gm_elo10: Option<f64>,
pub gm_elo8: Option<f64>,
pub gm_elo6: Option<f64>,
pub gm_elo: Option<f64>,
pub glob_rating: Option<f64>,
pub glob_rating_weighted: Option<f64>,
pub win_ratio: Option<f64>,
pub v10l: Option<f64>,
}
impl HistoryClanStats {
pub fn empty() -> Self {
Self {
tag: String::new(),
name: String::new(),
id: String::new(),
members: 0,
battles: None,
daily_battles: None,
efficiency: None,
fb_elo10: None,
fb_elo8: None,
fb_elo6: None,
fb_elo: None,
gm_elo10: None,
gm_elo8: None,
gm_elo6: None,
gm_elo: None,
glob_rating: None,
glob_rating_weighted: None,
win_ratio: None,
v10l: None,
}
}
}
// HistoryCollectionItem is what the contens is of 1 hisotry colleciton item
#[derive(Clone, Serialize)]
pub struct HistoryCollectionItem {
pub date: String,
pub stats: Vec<HistoryClanStats>,
}
// User defines what a user is
#[derive(Clone, Serialize)]
pub struct User {
pub rights: String,
pub userid: isize,
pub nickname: String,
}
// ClanPositionEvery this shows the clan possition in all type stats
#[derive(Clone, Serialize)]
pub struct ClanPositionEvery {
pub v10l: u32,
pub winratio: u32,
pub global_weighted: u32,
pub global: u32,
pub gm_elo: u32,
pub gm_elo6: u32,
pub gm_elo8: u32,
pub gm_elo10: u32,
pub fb_elo: u32,
pub fb_elo6: u32,
pub fb_elo8: u32,
pub fb_elo10: u32,
pub efficiency: u32,
pub daily_battles: u32,
pub battles: u32,
pub members: u32,
}
impl ClanPositionEvery {
pub fn
|
() -> Self {
Self {
v10l: 0,
winratio: 0,
global_weighted: 0,
global: 0,
gm_elo: 0,
gm_elo6: 0,
gm_elo8: 0,
gm_elo10: 0,
fb_elo: 0,
fb_elo6: 0,
fb_elo8: 0,
fb_elo10: 0,
efficiency: 0,
daily_battles: 0,
battles: 0,
members: 0,
}
}
}
// ClanNameAndTag is a type that has the tag, name and id of a clan
#[derive(Clone, Serialize)]
pub struct ClanNameAndTag {
pub tag: String,
pub name: String,
}
|
empty
|
identifier_name
|
decodable.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The compiler code necessary for `#[derive(Decodable)]`. See encodable.rs for more.
use ast;
use ast::{MetaItem, Expr, MutMutable};
use codemap::Span;
use ext::base::{ExtCtxt, Annotatable};
use ext::build::AstBuilder;
use ext::deriving::generic::*;
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
use parse::token;
use ptr::P;
pub fn expand_deriving_rustc_decodable(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable))
{
expand_deriving_decodable_imp(cx, span, mitem, item, push, "rustc_serialize")
}
pub fn expand_deriving_decodable(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable))
{
expand_deriving_decodable_imp(cx, span, mitem, item, push, "serialize")
}
fn expand_deriving_decodable_imp(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable),
krate: &'static str)
{
if!cx.use_std {
// FIXME(#21880): lift this requirement.
cx.span_err(span, "this trait cannot be derived with #![no_std]");
return;
}
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
path: Path::new_(vec!(krate, "Decodable"), None, vec!(), true),
additional_bounds: Vec::new(),
generics: LifetimeBounds::empty(),
methods: vec!(
MethodDef {
name: "decode",
generics: LifetimeBounds {
lifetimes: Vec::new(),
bounds: vec!(("__D", vec!(Path::new_(
vec!(krate, "Decoder"), None,
vec!(), true))))
},
explicit_self: None,
args: vec!(Ptr(Box::new(Literal(Path::new_local("__D"))),
Borrowed(None, MutMutable))),
ret_ty: Literal(Path::new_(
pathvec_std!(cx, core::result::Result),
None,
vec!(Box::new(Self_), Box::new(Literal(Path::new_(
vec!["__D", "Error"], None, vec![], false
)))),
true
)),
attributes: Vec::new(),
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|a, b, c| {
decodable_substructure(a, b, c, krate)
})),
}
),
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, item, push)
}
fn decodable_substructure(cx: &mut ExtCtxt, trait_span: Span,
substr: &Substructure,
krate: &str) -> P<Expr> {
let decoder = substr.nonself_args[0].clone();
let recurse = vec!(cx.ident_of(krate),
cx.ident_of("Decodable"),
cx.ident_of("decode"));
let exprdecode = cx.expr_path(cx.path_global(trait_span, recurse));
// throw an underscore in front to suppress unused variable warnings
let blkarg = cx.ident_of("_d");
let blkdecoder = cx.expr_ident(trait_span, blkarg);
return match *substr.fields {
StaticStruct(_, ref summary) => {
let nfields = match *summary {
Unnamed(ref fields) => fields.len(),
Named(ref fields) => fields.len()
};
let read_struct_field = cx.ident_of("read_struct_field");
let path = cx.path_ident(trait_span, substr.type_ident);
let result = decode_static_fields(cx,
trait_span,
path,
summary,
|cx, span, name, field| {
cx.expr_try(span,
cx.expr_method_call(span, blkdecoder.clone(), read_struct_field,
vec!(cx.expr_str(span, name),
cx.expr_usize(span, field),
exprdecode.clone())))
});
let result = cx.expr_ok(trait_span, result);
cx.expr_method_call(trait_span,
decoder,
cx.ident_of("read_struct"),
vec!(
cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
cx.expr_usize(trait_span, nfields),
cx.lambda_expr_1(trait_span, result, blkarg)
))
}
StaticEnum(_, ref fields) => {
let variant = cx.ident_of("i");
let mut arms = Vec::new();
let mut variants = Vec::new();
let rvariant_arg = cx.ident_of("read_enum_variant_arg");
for (i, &(name, v_span, ref parts)) in fields.iter().enumerate() {
variants.push(cx.expr_str(v_span, token::get_ident(name)));
let path = cx.path(trait_span, vec![substr.type_ident, name]);
let decoded = decode_static_fields(cx,
v_span,
path,
parts,
|cx, span, _, field| {
let idx = cx.expr_usize(span, field);
cx.expr_try(span,
cx.expr_method_call(span, blkdecoder.clone(), rvariant_arg,
vec!(idx, exprdecode.clone())))
});
arms.push(cx.arm(v_span,
vec!(cx.pat_lit(v_span, cx.expr_usize(v_span, i))),
decoded));
}
arms.push(cx.arm_unreachable(trait_span));
let result = cx.expr_ok(trait_span,
cx.expr_match(trait_span,
cx.expr_ident(trait_span, variant), arms));
let lambda = cx.lambda_expr(trait_span, vec!(blkarg, variant), result);
let variant_vec = cx.expr_vec(trait_span, variants);
let variant_vec = cx.expr_addr_of(trait_span, variant_vec);
let result = cx.expr_method_call(trait_span, blkdecoder,
cx.ident_of("read_enum_variant"),
vec!(variant_vec, lambda));
cx.expr_method_call(trait_span,
decoder,
cx.ident_of("read_enum"),
vec!(
cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
cx.lambda_expr_1(trait_span, result, blkarg)
))
}
_ => cx.bug("expected StaticEnum or StaticStruct in derive(Decodable)")
};
}
/// Create a decoder for a single enum variant/struct:
/// - `outer_pat_path` is the path to this enum variant/struct
/// - `getarg` should retrieve the `usize`-th field with name `@str`.
fn decode_static_fields<F>(cx: &mut ExtCtxt,
trait_span: Span,
outer_pat_path: ast::Path,
fields: &StaticFields,
mut getarg: F)
-> P<Expr> where
F: FnMut(&mut ExtCtxt, Span, InternedString, usize) -> P<Expr>,
|
cx.field_imm(span, name, arg)
}).collect();
cx.expr_struct(trait_span, outer_pat_path, fields)
}
}
}
|
{
match *fields {
Unnamed(ref fields) => {
let path_expr = cx.expr_path(outer_pat_path);
if fields.is_empty() {
path_expr
} else {
let fields = fields.iter().enumerate().map(|(i, &span)| {
getarg(cx, span,
token::intern_and_get_ident(&format!("_field{}", i)),
i)
}).collect();
cx.expr_call(trait_span, path_expr, fields)
}
}
Named(ref fields) => {
// use the field's span to get nicer error messages.
let fields = fields.iter().enumerate().map(|(i, &(name, span))| {
let arg = getarg(cx, span, token::get_ident(name), i);
|
identifier_body
|
decodable.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The compiler code necessary for `#[derive(Decodable)]`. See encodable.rs for more.
use ast;
use ast::{MetaItem, Expr, MutMutable};
use codemap::Span;
use ext::base::{ExtCtxt, Annotatable};
use ext::build::AstBuilder;
use ext::deriving::generic::*;
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
use parse::token;
use ptr::P;
pub fn expand_deriving_rustc_decodable(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable))
{
expand_deriving_decodable_imp(cx, span, mitem, item, push, "rustc_serialize")
}
pub fn expand_deriving_decodable(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable))
{
expand_deriving_decodable_imp(cx, span, mitem, item, push, "serialize")
}
fn expand_deriving_decodable_imp(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable),
krate: &'static str)
{
if!cx.use_std {
// FIXME(#21880): lift this requirement.
cx.span_err(span, "this trait cannot be derived with #![no_std]");
return;
}
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
path: Path::new_(vec!(krate, "Decodable"), None, vec!(), true),
additional_bounds: Vec::new(),
generics: LifetimeBounds::empty(),
methods: vec!(
MethodDef {
name: "decode",
generics: LifetimeBounds {
lifetimes: Vec::new(),
bounds: vec!(("__D", vec!(Path::new_(
vec!(krate, "Decoder"), None,
vec!(), true))))
},
explicit_self: None,
args: vec!(Ptr(Box::new(Literal(Path::new_local("__D"))),
Borrowed(None, MutMutable))),
ret_ty: Literal(Path::new_(
pathvec_std!(cx, core::result::Result),
None,
vec!(Box::new(Self_), Box::new(Literal(Path::new_(
vec!["__D", "Error"], None, vec![], false
)))),
true
)),
attributes: Vec::new(),
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|a, b, c| {
decodable_substructure(a, b, c, krate)
})),
}
),
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, item, push)
}
fn decodable_substructure(cx: &mut ExtCtxt, trait_span: Span,
substr: &Substructure,
krate: &str) -> P<Expr> {
let decoder = substr.nonself_args[0].clone();
let recurse = vec!(cx.ident_of(krate),
cx.ident_of("Decodable"),
cx.ident_of("decode"));
let exprdecode = cx.expr_path(cx.path_global(trait_span, recurse));
// throw an underscore in front to suppress unused variable warnings
let blkarg = cx.ident_of("_d");
let blkdecoder = cx.expr_ident(trait_span, blkarg);
return match *substr.fields {
StaticStruct(_, ref summary) => {
let nfields = match *summary {
Unnamed(ref fields) => fields.len(),
Named(ref fields) => fields.len()
};
let read_struct_field = cx.ident_of("read_struct_field");
|
let path = cx.path_ident(trait_span, substr.type_ident);
let result = decode_static_fields(cx,
trait_span,
path,
summary,
|cx, span, name, field| {
cx.expr_try(span,
cx.expr_method_call(span, blkdecoder.clone(), read_struct_field,
vec!(cx.expr_str(span, name),
cx.expr_usize(span, field),
exprdecode.clone())))
});
let result = cx.expr_ok(trait_span, result);
cx.expr_method_call(trait_span,
decoder,
cx.ident_of("read_struct"),
vec!(
cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
cx.expr_usize(trait_span, nfields),
cx.lambda_expr_1(trait_span, result, blkarg)
))
}
StaticEnum(_, ref fields) => {
let variant = cx.ident_of("i");
let mut arms = Vec::new();
let mut variants = Vec::new();
let rvariant_arg = cx.ident_of("read_enum_variant_arg");
for (i, &(name, v_span, ref parts)) in fields.iter().enumerate() {
variants.push(cx.expr_str(v_span, token::get_ident(name)));
let path = cx.path(trait_span, vec![substr.type_ident, name]);
let decoded = decode_static_fields(cx,
v_span,
path,
parts,
|cx, span, _, field| {
let idx = cx.expr_usize(span, field);
cx.expr_try(span,
cx.expr_method_call(span, blkdecoder.clone(), rvariant_arg,
vec!(idx, exprdecode.clone())))
});
arms.push(cx.arm(v_span,
vec!(cx.pat_lit(v_span, cx.expr_usize(v_span, i))),
decoded));
}
arms.push(cx.arm_unreachable(trait_span));
let result = cx.expr_ok(trait_span,
cx.expr_match(trait_span,
cx.expr_ident(trait_span, variant), arms));
let lambda = cx.lambda_expr(trait_span, vec!(blkarg, variant), result);
let variant_vec = cx.expr_vec(trait_span, variants);
let variant_vec = cx.expr_addr_of(trait_span, variant_vec);
let result = cx.expr_method_call(trait_span, blkdecoder,
cx.ident_of("read_enum_variant"),
vec!(variant_vec, lambda));
cx.expr_method_call(trait_span,
decoder,
cx.ident_of("read_enum"),
vec!(
cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
cx.lambda_expr_1(trait_span, result, blkarg)
))
}
_ => cx.bug("expected StaticEnum or StaticStruct in derive(Decodable)")
};
}
/// Create a decoder for a single enum variant/struct:
/// - `outer_pat_path` is the path to this enum variant/struct
/// - `getarg` should retrieve the `usize`-th field with name `@str`.
fn decode_static_fields<F>(cx: &mut ExtCtxt,
trait_span: Span,
outer_pat_path: ast::Path,
fields: &StaticFields,
mut getarg: F)
-> P<Expr> where
F: FnMut(&mut ExtCtxt, Span, InternedString, usize) -> P<Expr>,
{
match *fields {
Unnamed(ref fields) => {
let path_expr = cx.expr_path(outer_pat_path);
if fields.is_empty() {
path_expr
} else {
let fields = fields.iter().enumerate().map(|(i, &span)| {
getarg(cx, span,
token::intern_and_get_ident(&format!("_field{}", i)),
i)
}).collect();
cx.expr_call(trait_span, path_expr, fields)
}
}
Named(ref fields) => {
// use the field's span to get nicer error messages.
let fields = fields.iter().enumerate().map(|(i, &(name, span))| {
let arg = getarg(cx, span, token::get_ident(name), i);
cx.field_imm(span, name, arg)
}).collect();
cx.expr_struct(trait_span, outer_pat_path, fields)
}
}
}
|
random_line_split
|
|
decodable.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The compiler code necessary for `#[derive(Decodable)]`. See encodable.rs for more.
use ast;
use ast::{MetaItem, Expr, MutMutable};
use codemap::Span;
use ext::base::{ExtCtxt, Annotatable};
use ext::build::AstBuilder;
use ext::deriving::generic::*;
use ext::deriving::generic::ty::*;
use parse::token::InternedString;
use parse::token;
use ptr::P;
pub fn
|
(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable))
{
expand_deriving_decodable_imp(cx, span, mitem, item, push, "rustc_serialize")
}
pub fn expand_deriving_decodable(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable))
{
expand_deriving_decodable_imp(cx, span, mitem, item, push, "serialize")
}
fn expand_deriving_decodable_imp(cx: &mut ExtCtxt,
span: Span,
mitem: &MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable),
krate: &'static str)
{
if!cx.use_std {
// FIXME(#21880): lift this requirement.
cx.span_err(span, "this trait cannot be derived with #![no_std]");
return;
}
let trait_def = TraitDef {
span: span,
attributes: Vec::new(),
path: Path::new_(vec!(krate, "Decodable"), None, vec!(), true),
additional_bounds: Vec::new(),
generics: LifetimeBounds::empty(),
methods: vec!(
MethodDef {
name: "decode",
generics: LifetimeBounds {
lifetimes: Vec::new(),
bounds: vec!(("__D", vec!(Path::new_(
vec!(krate, "Decoder"), None,
vec!(), true))))
},
explicit_self: None,
args: vec!(Ptr(Box::new(Literal(Path::new_local("__D"))),
Borrowed(None, MutMutable))),
ret_ty: Literal(Path::new_(
pathvec_std!(cx, core::result::Result),
None,
vec!(Box::new(Self_), Box::new(Literal(Path::new_(
vec!["__D", "Error"], None, vec![], false
)))),
true
)),
attributes: Vec::new(),
is_unsafe: false,
combine_substructure: combine_substructure(Box::new(|a, b, c| {
decodable_substructure(a, b, c, krate)
})),
}
),
associated_types: Vec::new(),
};
trait_def.expand(cx, mitem, item, push)
}
fn decodable_substructure(cx: &mut ExtCtxt, trait_span: Span,
substr: &Substructure,
krate: &str) -> P<Expr> {
let decoder = substr.nonself_args[0].clone();
let recurse = vec!(cx.ident_of(krate),
cx.ident_of("Decodable"),
cx.ident_of("decode"));
let exprdecode = cx.expr_path(cx.path_global(trait_span, recurse));
// throw an underscore in front to suppress unused variable warnings
let blkarg = cx.ident_of("_d");
let blkdecoder = cx.expr_ident(trait_span, blkarg);
return match *substr.fields {
StaticStruct(_, ref summary) => {
let nfields = match *summary {
Unnamed(ref fields) => fields.len(),
Named(ref fields) => fields.len()
};
let read_struct_field = cx.ident_of("read_struct_field");
let path = cx.path_ident(trait_span, substr.type_ident);
let result = decode_static_fields(cx,
trait_span,
path,
summary,
|cx, span, name, field| {
cx.expr_try(span,
cx.expr_method_call(span, blkdecoder.clone(), read_struct_field,
vec!(cx.expr_str(span, name),
cx.expr_usize(span, field),
exprdecode.clone())))
});
let result = cx.expr_ok(trait_span, result);
cx.expr_method_call(trait_span,
decoder,
cx.ident_of("read_struct"),
vec!(
cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
cx.expr_usize(trait_span, nfields),
cx.lambda_expr_1(trait_span, result, blkarg)
))
}
StaticEnum(_, ref fields) => {
let variant = cx.ident_of("i");
let mut arms = Vec::new();
let mut variants = Vec::new();
let rvariant_arg = cx.ident_of("read_enum_variant_arg");
for (i, &(name, v_span, ref parts)) in fields.iter().enumerate() {
variants.push(cx.expr_str(v_span, token::get_ident(name)));
let path = cx.path(trait_span, vec![substr.type_ident, name]);
let decoded = decode_static_fields(cx,
v_span,
path,
parts,
|cx, span, _, field| {
let idx = cx.expr_usize(span, field);
cx.expr_try(span,
cx.expr_method_call(span, blkdecoder.clone(), rvariant_arg,
vec!(idx, exprdecode.clone())))
});
arms.push(cx.arm(v_span,
vec!(cx.pat_lit(v_span, cx.expr_usize(v_span, i))),
decoded));
}
arms.push(cx.arm_unreachable(trait_span));
let result = cx.expr_ok(trait_span,
cx.expr_match(trait_span,
cx.expr_ident(trait_span, variant), arms));
let lambda = cx.lambda_expr(trait_span, vec!(blkarg, variant), result);
let variant_vec = cx.expr_vec(trait_span, variants);
let variant_vec = cx.expr_addr_of(trait_span, variant_vec);
let result = cx.expr_method_call(trait_span, blkdecoder,
cx.ident_of("read_enum_variant"),
vec!(variant_vec, lambda));
cx.expr_method_call(trait_span,
decoder,
cx.ident_of("read_enum"),
vec!(
cx.expr_str(trait_span, token::get_ident(substr.type_ident)),
cx.lambda_expr_1(trait_span, result, blkarg)
))
}
_ => cx.bug("expected StaticEnum or StaticStruct in derive(Decodable)")
};
}
/// Create a decoder for a single enum variant/struct:
/// - `outer_pat_path` is the path to this enum variant/struct
/// - `getarg` should retrieve the `usize`-th field with name `@str`.
fn decode_static_fields<F>(cx: &mut ExtCtxt,
trait_span: Span,
outer_pat_path: ast::Path,
fields: &StaticFields,
mut getarg: F)
-> P<Expr> where
F: FnMut(&mut ExtCtxt, Span, InternedString, usize) -> P<Expr>,
{
match *fields {
Unnamed(ref fields) => {
let path_expr = cx.expr_path(outer_pat_path);
if fields.is_empty() {
path_expr
} else {
let fields = fields.iter().enumerate().map(|(i, &span)| {
getarg(cx, span,
token::intern_and_get_ident(&format!("_field{}", i)),
i)
}).collect();
cx.expr_call(trait_span, path_expr, fields)
}
}
Named(ref fields) => {
// use the field's span to get nicer error messages.
let fields = fields.iter().enumerate().map(|(i, &(name, span))| {
let arg = getarg(cx, span, token::get_ident(name), i);
cx.field_imm(span, name, arg)
}).collect();
cx.expr_struct(trait_span, outer_pat_path, fields)
}
}
}
|
expand_deriving_rustc_decodable
|
identifier_name
|
const-err-multi.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(const_err)]
pub const A: i8 = -std::i8::MIN;
//~^ ERROR const_err
pub const B: i8 = A;
//~^ ERROR const_err
|
fn main() {
let _ = (A, B, C, D);
}
|
pub const C: u8 = A as u8;
//~^ ERROR const_err
pub const D: i8 = 50 - A;
//~^ ERROR const_err
|
random_line_split
|
const-err-multi.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(const_err)]
pub const A: i8 = -std::i8::MIN;
//~^ ERROR const_err
pub const B: i8 = A;
//~^ ERROR const_err
pub const C: u8 = A as u8;
//~^ ERROR const_err
pub const D: i8 = 50 - A;
//~^ ERROR const_err
fn main()
|
{
let _ = (A, B, C, D);
}
|
identifier_body
|
|
const-err-multi.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(const_err)]
pub const A: i8 = -std::i8::MIN;
//~^ ERROR const_err
pub const B: i8 = A;
//~^ ERROR const_err
pub const C: u8 = A as u8;
//~^ ERROR const_err
pub const D: i8 = 50 - A;
//~^ ERROR const_err
fn
|
() {
let _ = (A, B, C, D);
}
|
main
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.