file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
leafmt.rs
|
//! Lea code formatter.
// FIXME This requires the parser to preserve comments to work properly
// This is blocked on an upstream issue: https://github.com/kevinmehall/rust-peg/issues/84
#[macro_use]
extern crate clap;
extern crate term;
extern crate rustc_serialize;
extern crate lea_parser as parser;
extern crate lea;
use parser::span::DummyTerm;
use parser::prettyprint::PrettyPrinter;
use std::io::{self, stdin, stderr, stdout, Read, Write};
use std::fs::File;
use std::path::Path;
/// Opens a terminal that writes to stderr. If stderr couldn't be opened as a terminal, creates a
/// `DummyTerm` that writes to stderr instead.
fn stderr_term() -> Box<term::StderrTerminal> {
term::stderr().unwrap_or_else(|| Box::new(DummyTerm(io::stderr())))
}
/// Parses the given source code and pretty-prints it
fn prettyprint<W: Write>(code: &str, source_name: &str, mut target: W) -> io::Result<()> {
match parser::block(code) {
Ok(main) => {
let mut pp = PrettyPrinter::new(&mut target);
try!(pp.print_block(&main));
}
Err(e) => {
try!(e.format(code, source_name, &mut *stderr_term()));
}
}
Ok(())
}
fn read_file(filename: &str) -> io::Result<String> {
let mut s = String::new();
let mut file = try!(File::open(&Path::new(filename)));
try!(file.read_to_string(&mut s));
Ok(s)
}
fn main()
|
Ok(content) => content,
Err(e) => {
writeln!(stderr(), "{}", e).unwrap();
return;
}
}
}
// Open output
let writer: Box<Write>;
match matches.value_of("out") {
None | Some("-") => {
writer = Box::new(stdout()) as Box<Write>;
}
Some(s) => {
let f = match File::create(&Path::new(s)) {
Ok(f) => f,
Err(e) => {
writeln!(stderr(), "{}", e).unwrap();
return;
}
};
writer = Box::new(f) as Box<Write>;
}
}
prettyprint(&code, &source_name, writer).unwrap();
}
|
{
let matches = clap_app!(leafmt =>
(version: lea::version_str())
(about: "Lea source code formatter / pretty printer")
(@arg FILE: +required "The file to format (`-` to read from stdin)")
(@arg out: -o --out +takes_value "Write output to <out> (`-` to write to stdout).")
(after_help: "By default, leafmt will write the formatted code to stdout.\n")
).get_matches();
let file = matches.value_of("FILE").unwrap();
// Read input
let mut code = String::new();
let source_name;
if file == "-" {
stdin().read_to_string(&mut code).unwrap();
source_name = "<stdin>";
} else {
source_name = file;
code = match read_file(&source_name) {
|
identifier_body
|
leafmt.rs
|
//! Lea code formatter.
// FIXME This requires the parser to preserve comments to work properly
// This is blocked on an upstream issue: https://github.com/kevinmehall/rust-peg/issues/84
#[macro_use]
extern crate clap;
extern crate term;
extern crate rustc_serialize;
extern crate lea_parser as parser;
extern crate lea;
use parser::span::DummyTerm;
use parser::prettyprint::PrettyPrinter;
use std::io::{self, stdin, stderr, stdout, Read, Write};
use std::fs::File;
use std::path::Path;
/// Opens a terminal that writes to stderr. If stderr couldn't be opened as a terminal, creates a
/// `DummyTerm` that writes to stderr instead.
fn stderr_term() -> Box<term::StderrTerminal> {
term::stderr().unwrap_or_else(|| Box::new(DummyTerm(io::stderr())))
}
/// Parses the given source code and pretty-prints it
fn prettyprint<W: Write>(code: &str, source_name: &str, mut target: W) -> io::Result<()> {
match parser::block(code) {
Ok(main) => {
let mut pp = PrettyPrinter::new(&mut target);
try!(pp.print_block(&main));
}
Err(e) => {
try!(e.format(code, source_name, &mut *stderr_term()));
}
}
Ok(())
}
fn read_file(filename: &str) -> io::Result<String> {
let mut s = String::new();
let mut file = try!(File::open(&Path::new(filename)));
try!(file.read_to_string(&mut s));
Ok(s)
}
fn main() {
let matches = clap_app!(leafmt =>
(version: lea::version_str())
(about: "Lea source code formatter / pretty printer")
(@arg FILE: +required "The file to format (`-` to read from stdin)")
(@arg out: -o --out +takes_value "Write output to <out> (`-` to write to stdout).")
(after_help: "By default, leafmt will write the formatted code to stdout.\n")
).get_matches();
let file = matches.value_of("FILE").unwrap();
// Read input
let mut code = String::new();
let source_name;
if file == "-"
|
else {
source_name = file;
code = match read_file(&source_name) {
Ok(content) => content,
Err(e) => {
writeln!(stderr(), "{}", e).unwrap();
return;
}
}
}
// Open output
let writer: Box<Write>;
match matches.value_of("out") {
None | Some("-") => {
writer = Box::new(stdout()) as Box<Write>;
}
Some(s) => {
let f = match File::create(&Path::new(s)) {
Ok(f) => f,
Err(e) => {
writeln!(stderr(), "{}", e).unwrap();
return;
}
};
writer = Box::new(f) as Box<Write>;
}
}
prettyprint(&code, &source_name, writer).unwrap();
}
|
{
stdin().read_to_string(&mut code).unwrap();
source_name = "<stdin>";
}
|
conditional_block
|
leafmt.rs
|
//! Lea code formatter.
// FIXME This requires the parser to preserve comments to work properly
// This is blocked on an upstream issue: https://github.com/kevinmehall/rust-peg/issues/84
#[macro_use]
extern crate clap;
extern crate term;
extern crate rustc_serialize;
extern crate lea_parser as parser;
extern crate lea;
use parser::span::DummyTerm;
use parser::prettyprint::PrettyPrinter;
use std::io::{self, stdin, stderr, stdout, Read, Write};
use std::fs::File;
use std::path::Path;
/// Opens a terminal that writes to stderr. If stderr couldn't be opened as a terminal, creates a
/// `DummyTerm` that writes to stderr instead.
fn stderr_term() -> Box<term::StderrTerminal> {
term::stderr().unwrap_or_else(|| Box::new(DummyTerm(io::stderr())))
}
/// Parses the given source code and pretty-prints it
|
fn prettyprint<W: Write>(code: &str, source_name: &str, mut target: W) -> io::Result<()> {
match parser::block(code) {
Ok(main) => {
let mut pp = PrettyPrinter::new(&mut target);
try!(pp.print_block(&main));
}
Err(e) => {
try!(e.format(code, source_name, &mut *stderr_term()));
}
}
Ok(())
}
fn read_file(filename: &str) -> io::Result<String> {
let mut s = String::new();
let mut file = try!(File::open(&Path::new(filename)));
try!(file.read_to_string(&mut s));
Ok(s)
}
fn main() {
let matches = clap_app!(leafmt =>
(version: lea::version_str())
(about: "Lea source code formatter / pretty printer")
(@arg FILE: +required "The file to format (`-` to read from stdin)")
(@arg out: -o --out +takes_value "Write output to <out> (`-` to write to stdout).")
(after_help: "By default, leafmt will write the formatted code to stdout.\n")
).get_matches();
let file = matches.value_of("FILE").unwrap();
// Read input
let mut code = String::new();
let source_name;
if file == "-" {
stdin().read_to_string(&mut code).unwrap();
source_name = "<stdin>";
} else {
source_name = file;
code = match read_file(&source_name) {
Ok(content) => content,
Err(e) => {
writeln!(stderr(), "{}", e).unwrap();
return;
}
}
}
// Open output
let writer: Box<Write>;
match matches.value_of("out") {
None | Some("-") => {
writer = Box::new(stdout()) as Box<Write>;
}
Some(s) => {
let f = match File::create(&Path::new(s)) {
Ok(f) => f,
Err(e) => {
writeln!(stderr(), "{}", e).unwrap();
return;
}
};
writer = Box::new(f) as Box<Write>;
}
}
prettyprint(&code, &source_name, writer).unwrap();
}
|
random_line_split
|
|
mod.rs
|
// library of matrix and vector functions
// a 3 component vector
pub struct Vec3{
pub x: f32,
pub y: f32,
pub z: f32
}
// associated functions of Vec3
impl Vec3{
// used for constructing Vec3 nicely
pub fn new(x:f32,y:f32,z:f32) -> Vec3 {
Vec3{x: x, y: y, z: z}
}
}
// a 3x3 matrix
pub struct Mat3{
pub values: [[f32;3];3]
}
impl Mat3{
// used for constructing an empty matrix
pub fn new_empty() -> Mat3{
Mat3{values:
[[0.0,0.0,0.0],
[0.0,0.0,0.0],
[0.0,0.0,0.0]]
}
}
}
pub fn dot(vec1:&Vec3,vec2:&Vec3) -> f32{
(vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z)
}
// matrix multiplication of a 3x3 matrix with a 3 dimensional vector (1x3 matrix)
// returns the result as a vec3
#[allow(non_snake_case)]
pub fn MatXVec3(mat: &Mat3,vec: &Vec3) -> Vec3{
let mut result = Vec3::new(0.0,0.0,0.0);
// construct vectors from the matrix
let mval = mat.values;
let mvec1 = Vec3::new(mval[0][0],mval[0][1],mval[0][2]);
let mvec2 = Vec3::new(mval[1][0],mval[1][1],mval[1][2]);
let mvec3 = Vec3::new(mval[2][0],mval[2][1],mval[2][2]);
// matrix multiplication is just a bunch of dot products
result.x = dot(&mvec1,vec);
result.y = dot(&mvec2,vec);
result.z = dot(&mvec3,vec);
// return resulting vector
result
}
// Matrix multiplication of a 3x3 matrix with a 3x3 matrix
// returns the result as a Mat3
#[allow(non_snake_case)]
pub fn
|
(mat1: &Mat3,mat2: &Mat3) -> Mat3{
let mut result = Mat3::new_empty();
// go by row
for i in 0..3{
// go by column
for j in 0..3{
let mut sum :f32 = 0.0;
// get the result of the row,column pair (i,j)
for k in 0..3{
sum = sum + (mat1.values[i][k] * mat2.values[k][j]);
}
result.values[j][i] = sum;
}
}
result
}
|
MatXMat3
|
identifier_name
|
mod.rs
|
// library of matrix and vector functions
// a 3 component vector
pub struct Vec3{
pub x: f32,
pub y: f32,
pub z: f32
}
// associated functions of Vec3
|
}
}
// a 3x3 matrix
pub struct Mat3{
pub values: [[f32;3];3]
}
impl Mat3{
// used for constructing an empty matrix
pub fn new_empty() -> Mat3{
Mat3{values:
[[0.0,0.0,0.0],
[0.0,0.0,0.0],
[0.0,0.0,0.0]]
}
}
}
pub fn dot(vec1:&Vec3,vec2:&Vec3) -> f32{
(vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z)
}
// matrix multiplication of a 3x3 matrix with a 3 dimensional vector (1x3 matrix)
// returns the result as a vec3
#[allow(non_snake_case)]
pub fn MatXVec3(mat: &Mat3,vec: &Vec3) -> Vec3{
let mut result = Vec3::new(0.0,0.0,0.0);
// construct vectors from the matrix
let mval = mat.values;
let mvec1 = Vec3::new(mval[0][0],mval[0][1],mval[0][2]);
let mvec2 = Vec3::new(mval[1][0],mval[1][1],mval[1][2]);
let mvec3 = Vec3::new(mval[2][0],mval[2][1],mval[2][2]);
// matrix multiplication is just a bunch of dot products
result.x = dot(&mvec1,vec);
result.y = dot(&mvec2,vec);
result.z = dot(&mvec3,vec);
// return resulting vector
result
}
// Matrix multiplication of a 3x3 matrix with a 3x3 matrix
// returns the result as a Mat3
#[allow(non_snake_case)]
pub fn MatXMat3 (mat1: &Mat3,mat2: &Mat3) -> Mat3{
let mut result = Mat3::new_empty();
// go by row
for i in 0..3{
// go by column
for j in 0..3{
let mut sum :f32 = 0.0;
// get the result of the row,column pair (i,j)
for k in 0..3{
sum = sum + (mat1.values[i][k] * mat2.values[k][j]);
}
result.values[j][i] = sum;
}
}
result
}
|
impl Vec3{
// used for constructing Vec3 nicely
pub fn new(x:f32,y:f32,z:f32) -> Vec3 {
Vec3{x: x, y: y, z: z}
|
random_line_split
|
quickcheck_types.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! QuickCheck support for wire packs.
use quickcheck::{empty_shrinker, Arbitrary, Gen};
use mercurial_types::{Delta, HgNodeHash, MPath, RepoPath, NULL_HASH};
use revisionstore_types::Metadata;
use super::{DataEntry, HistoryEntry, Kind};
#[derive(Clone, Debug)]
pub struct WirePackPartSequence {
pub kind: Kind,
pub files: Vec<FileEntries>,
}
impl Arbitrary for WirePackPartSequence {
fn arbitrary(g: &mut Gen) -> Self {
let size = g.size();
let kind = if bool::arbitrary(g) {
Kind::Tree
} else {
Kind::File
};
let file_count = usize::arbitrary(g) % size;
let files = (0..file_count)
.map(|_| FileEntries::arbitrary_params(g, kind))
.collect();
Self { kind, files }
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let kind = self.kind;
Box::new(self.files.shrink().map(move |files| Self { kind, files }))
}
}
#[derive(Clone, Debug)]
pub struct FileEntries {
pub filename: RepoPath,
pub history: Vec<HistoryEntry>,
pub data: Vec<DataEntry>,
}
impl FileEntries {
fn arbitrary_params(g: &mut Gen, kind: Kind) -> Self {
let size = g.size();
let history_len = usize::arbitrary(g) % size;
let data_len = usize::arbitrary(g) % size;
let filename = match kind {
Kind::Tree => {
// 10% chance for it to be the root
if u64::arbitrary(g) % 10 == 0 {
RepoPath::root()
} else {
RepoPath::DirectoryPath(MPath::arbitrary(g))
}
}
Kind::File => RepoPath::FilePath(MPath::arbitrary(g)),
};
let history = (0..history_len)
.map(|_| HistoryEntry::arbitrary_kind(g, kind))
.collect();
let data = (0..data_len).map(|_| DataEntry::arbitrary(g)).collect();
Self {
filename,
history,
data,
}
}
}
impl Arbitrary for FileEntries {
fn arbitrary(_g: &mut Gen) -> Self {
// FileEntries depends on the kind of the overall wirepack, so this can't be implemented.
unimplemented!("use WirePackPartSequence::arbitrary instead")
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let filename = self.filename.clone();
let self_history = self.history.clone();
let self_data = self.data.clone();
Box::new(
(self_history, self_data)
.shrink()
.map(move |(history, data)| Self {
filename: filename.clone(),
history,
data,
}),
)
}
}
impl HistoryEntry {
pub fn arbitrary_kind(g: &mut Gen, kind: Kind) -> Self {
let copy_from = match kind {
Kind::File => {
// 20% chance of generating copy-from info
if *g.choose(&[0, 1, 2, 3, 4]).unwrap() == 0 {
Some(RepoPath::FilePath(MPath::arbitrary(g)))
} else {
None
}
}
Kind::Tree => None,
};
Self {
node: HgNodeHash::arbitrary(g),
p1: HgNodeHash::arbitrary(g),
p2: HgNodeHash::arbitrary(g),
linknode: HgNodeHash::arbitrary(g),
copy_from,
}
}
}
impl Arbitrary for HistoryEntry {
fn arbitrary(_g: &mut Gen) -> Self {
// HistoryEntry depends on the kind of the overall wirepack, so this can't be implemented.
unimplemented!("use WirePackPartSequence::arbitrary instead")
}
// Not going to get anything out of shrinking this since MPath is not shrinkable.
}
impl Arbitrary for DataEntry {
fn arbitrary(g: &mut Gen) -> Self {
// 20% chance of a fulltext revision
let (delta_base, delta) = if *g.choose(&[0, 1, 2, 3, 4]).unwrap() == 0 {
(NULL_HASH, Delta::new_fulltext(Vec::arbitrary(g)))
} else {
let mut delta_base = NULL_HASH;
while delta_base == NULL_HASH {
delta_base = HgNodeHash::arbitrary(g);
}
(delta_base, Delta::arbitrary(g))
};
// 50% chance of having metadata (i.e. being v2)
let metadata = if bool::arbitrary(g) {
// 50% chance of flags being present
let flags = if bool::arbitrary(g)
|
else { None };
// 50% chance of size being present
let size = if bool::arbitrary(g) { Some(2) } else { None };
Some(Metadata { flags, size })
} else {
None
};
Self {
node: HgNodeHash::arbitrary(g),
delta_base,
delta,
metadata,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
// The delta is the only shrinkable here. However, we cannot shrink it if we don't have
// base (this might generate a non-fulltext delta).
if self.delta_base == NULL_HASH {
empty_shrinker()
} else {
let node = self.node;
let delta_base = self.delta_base;
let metadata = self.metadata;
Box::new(self.delta.shrink().map(move |delta| Self {
node,
delta_base,
delta,
metadata,
}))
}
}
}
|
{ Some(1) }
|
conditional_block
|
quickcheck_types.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! QuickCheck support for wire packs.
use quickcheck::{empty_shrinker, Arbitrary, Gen};
use mercurial_types::{Delta, HgNodeHash, MPath, RepoPath, NULL_HASH};
use revisionstore_types::Metadata;
use super::{DataEntry, HistoryEntry, Kind};
#[derive(Clone, Debug)]
pub struct WirePackPartSequence {
pub kind: Kind,
pub files: Vec<FileEntries>,
}
impl Arbitrary for WirePackPartSequence {
fn arbitrary(g: &mut Gen) -> Self {
let size = g.size();
let kind = if bool::arbitrary(g) {
Kind::Tree
} else {
Kind::File
};
let file_count = usize::arbitrary(g) % size;
let files = (0..file_count)
.map(|_| FileEntries::arbitrary_params(g, kind))
.collect();
Self { kind, files }
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let kind = self.kind;
Box::new(self.files.shrink().map(move |files| Self { kind, files }))
}
}
#[derive(Clone, Debug)]
pub struct FileEntries {
pub filename: RepoPath,
pub history: Vec<HistoryEntry>,
pub data: Vec<DataEntry>,
}
impl FileEntries {
fn arbitrary_params(g: &mut Gen, kind: Kind) -> Self {
let size = g.size();
let history_len = usize::arbitrary(g) % size;
let data_len = usize::arbitrary(g) % size;
let filename = match kind {
Kind::Tree => {
// 10% chance for it to be the root
if u64::arbitrary(g) % 10 == 0 {
RepoPath::root()
} else {
RepoPath::DirectoryPath(MPath::arbitrary(g))
}
}
Kind::File => RepoPath::FilePath(MPath::arbitrary(g)),
};
let history = (0..history_len)
.map(|_| HistoryEntry::arbitrary_kind(g, kind))
.collect();
let data = (0..data_len).map(|_| DataEntry::arbitrary(g)).collect();
Self {
filename,
history,
data,
}
}
}
impl Arbitrary for FileEntries {
fn arbitrary(_g: &mut Gen) -> Self {
// FileEntries depends on the kind of the overall wirepack, so this can't be implemented.
unimplemented!("use WirePackPartSequence::arbitrary instead")
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let filename = self.filename.clone();
let self_history = self.history.clone();
let self_data = self.data.clone();
Box::new(
(self_history, self_data)
.shrink()
.map(move |(history, data)| Self {
filename: filename.clone(),
history,
data,
}),
)
}
}
impl HistoryEntry {
pub fn arbitrary_kind(g: &mut Gen, kind: Kind) -> Self {
let copy_from = match kind {
Kind::File => {
// 20% chance of generating copy-from info
if *g.choose(&[0, 1, 2, 3, 4]).unwrap() == 0 {
Some(RepoPath::FilePath(MPath::arbitrary(g)))
} else {
None
}
}
Kind::Tree => None,
};
Self {
node: HgNodeHash::arbitrary(g),
p1: HgNodeHash::arbitrary(g),
p2: HgNodeHash::arbitrary(g),
linknode: HgNodeHash::arbitrary(g),
copy_from,
}
}
}
impl Arbitrary for HistoryEntry {
fn arbitrary(_g: &mut Gen) -> Self {
|
// Not going to get anything out of shrinking this since MPath is not shrinkable.
}
impl Arbitrary for DataEntry {
fn arbitrary(g: &mut Gen) -> Self {
// 20% chance of a fulltext revision
let (delta_base, delta) = if *g.choose(&[0, 1, 2, 3, 4]).unwrap() == 0 {
(NULL_HASH, Delta::new_fulltext(Vec::arbitrary(g)))
} else {
let mut delta_base = NULL_HASH;
while delta_base == NULL_HASH {
delta_base = HgNodeHash::arbitrary(g);
}
(delta_base, Delta::arbitrary(g))
};
// 50% chance of having metadata (i.e. being v2)
let metadata = if bool::arbitrary(g) {
// 50% chance of flags being present
let flags = if bool::arbitrary(g) { Some(1) } else { None };
// 50% chance of size being present
let size = if bool::arbitrary(g) { Some(2) } else { None };
Some(Metadata { flags, size })
} else {
None
};
Self {
node: HgNodeHash::arbitrary(g),
delta_base,
delta,
metadata,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
// The delta is the only shrinkable here. However, we cannot shrink it if we don't have
// base (this might generate a non-fulltext delta).
if self.delta_base == NULL_HASH {
empty_shrinker()
} else {
let node = self.node;
let delta_base = self.delta_base;
let metadata = self.metadata;
Box::new(self.delta.shrink().map(move |delta| Self {
node,
delta_base,
delta,
metadata,
}))
}
}
}
|
// HistoryEntry depends on the kind of the overall wirepack, so this can't be implemented.
unimplemented!("use WirePackPartSequence::arbitrary instead")
}
|
random_line_split
|
quickcheck_types.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! QuickCheck support for wire packs.
use quickcheck::{empty_shrinker, Arbitrary, Gen};
use mercurial_types::{Delta, HgNodeHash, MPath, RepoPath, NULL_HASH};
use revisionstore_types::Metadata;
use super::{DataEntry, HistoryEntry, Kind};
#[derive(Clone, Debug)]
pub struct WirePackPartSequence {
pub kind: Kind,
pub files: Vec<FileEntries>,
}
impl Arbitrary for WirePackPartSequence {
fn
|
(g: &mut Gen) -> Self {
let size = g.size();
let kind = if bool::arbitrary(g) {
Kind::Tree
} else {
Kind::File
};
let file_count = usize::arbitrary(g) % size;
let files = (0..file_count)
.map(|_| FileEntries::arbitrary_params(g, kind))
.collect();
Self { kind, files }
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let kind = self.kind;
Box::new(self.files.shrink().map(move |files| Self { kind, files }))
}
}
#[derive(Clone, Debug)]
pub struct FileEntries {
pub filename: RepoPath,
pub history: Vec<HistoryEntry>,
pub data: Vec<DataEntry>,
}
impl FileEntries {
fn arbitrary_params(g: &mut Gen, kind: Kind) -> Self {
let size = g.size();
let history_len = usize::arbitrary(g) % size;
let data_len = usize::arbitrary(g) % size;
let filename = match kind {
Kind::Tree => {
// 10% chance for it to be the root
if u64::arbitrary(g) % 10 == 0 {
RepoPath::root()
} else {
RepoPath::DirectoryPath(MPath::arbitrary(g))
}
}
Kind::File => RepoPath::FilePath(MPath::arbitrary(g)),
};
let history = (0..history_len)
.map(|_| HistoryEntry::arbitrary_kind(g, kind))
.collect();
let data = (0..data_len).map(|_| DataEntry::arbitrary(g)).collect();
Self {
filename,
history,
data,
}
}
}
impl Arbitrary for FileEntries {
fn arbitrary(_g: &mut Gen) -> Self {
// FileEntries depends on the kind of the overall wirepack, so this can't be implemented.
unimplemented!("use WirePackPartSequence::arbitrary instead")
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let filename = self.filename.clone();
let self_history = self.history.clone();
let self_data = self.data.clone();
Box::new(
(self_history, self_data)
.shrink()
.map(move |(history, data)| Self {
filename: filename.clone(),
history,
data,
}),
)
}
}
impl HistoryEntry {
pub fn arbitrary_kind(g: &mut Gen, kind: Kind) -> Self {
let copy_from = match kind {
Kind::File => {
// 20% chance of generating copy-from info
if *g.choose(&[0, 1, 2, 3, 4]).unwrap() == 0 {
Some(RepoPath::FilePath(MPath::arbitrary(g)))
} else {
None
}
}
Kind::Tree => None,
};
Self {
node: HgNodeHash::arbitrary(g),
p1: HgNodeHash::arbitrary(g),
p2: HgNodeHash::arbitrary(g),
linknode: HgNodeHash::arbitrary(g),
copy_from,
}
}
}
impl Arbitrary for HistoryEntry {
fn arbitrary(_g: &mut Gen) -> Self {
// HistoryEntry depends on the kind of the overall wirepack, so this can't be implemented.
unimplemented!("use WirePackPartSequence::arbitrary instead")
}
// Not going to get anything out of shrinking this since MPath is not shrinkable.
}
impl Arbitrary for DataEntry {
fn arbitrary(g: &mut Gen) -> Self {
// 20% chance of a fulltext revision
let (delta_base, delta) = if *g.choose(&[0, 1, 2, 3, 4]).unwrap() == 0 {
(NULL_HASH, Delta::new_fulltext(Vec::arbitrary(g)))
} else {
let mut delta_base = NULL_HASH;
while delta_base == NULL_HASH {
delta_base = HgNodeHash::arbitrary(g);
}
(delta_base, Delta::arbitrary(g))
};
// 50% chance of having metadata (i.e. being v2)
let metadata = if bool::arbitrary(g) {
// 50% chance of flags being present
let flags = if bool::arbitrary(g) { Some(1) } else { None };
// 50% chance of size being present
let size = if bool::arbitrary(g) { Some(2) } else { None };
Some(Metadata { flags, size })
} else {
None
};
Self {
node: HgNodeHash::arbitrary(g),
delta_base,
delta,
metadata,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
// The delta is the only shrinkable here. However, we cannot shrink it if we don't have
// base (this might generate a non-fulltext delta).
if self.delta_base == NULL_HASH {
empty_shrinker()
} else {
let node = self.node;
let delta_base = self.delta_base;
let metadata = self.metadata;
Box::new(self.delta.shrink().map(move |delta| Self {
node,
delta_base,
delta,
metadata,
}))
}
}
}
|
arbitrary
|
identifier_name
|
quickcheck_types.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! QuickCheck support for wire packs.
use quickcheck::{empty_shrinker, Arbitrary, Gen};
use mercurial_types::{Delta, HgNodeHash, MPath, RepoPath, NULL_HASH};
use revisionstore_types::Metadata;
use super::{DataEntry, HistoryEntry, Kind};
#[derive(Clone, Debug)]
pub struct WirePackPartSequence {
pub kind: Kind,
pub files: Vec<FileEntries>,
}
impl Arbitrary for WirePackPartSequence {
fn arbitrary(g: &mut Gen) -> Self {
let size = g.size();
let kind = if bool::arbitrary(g) {
Kind::Tree
} else {
Kind::File
};
let file_count = usize::arbitrary(g) % size;
let files = (0..file_count)
.map(|_| FileEntries::arbitrary_params(g, kind))
.collect();
Self { kind, files }
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let kind = self.kind;
Box::new(self.files.shrink().map(move |files| Self { kind, files }))
}
}
#[derive(Clone, Debug)]
pub struct FileEntries {
pub filename: RepoPath,
pub history: Vec<HistoryEntry>,
pub data: Vec<DataEntry>,
}
impl FileEntries {
fn arbitrary_params(g: &mut Gen, kind: Kind) -> Self {
let size = g.size();
let history_len = usize::arbitrary(g) % size;
let data_len = usize::arbitrary(g) % size;
let filename = match kind {
Kind::Tree => {
// 10% chance for it to be the root
if u64::arbitrary(g) % 10 == 0 {
RepoPath::root()
} else {
RepoPath::DirectoryPath(MPath::arbitrary(g))
}
}
Kind::File => RepoPath::FilePath(MPath::arbitrary(g)),
};
let history = (0..history_len)
.map(|_| HistoryEntry::arbitrary_kind(g, kind))
.collect();
let data = (0..data_len).map(|_| DataEntry::arbitrary(g)).collect();
Self {
filename,
history,
data,
}
}
}
impl Arbitrary for FileEntries {
fn arbitrary(_g: &mut Gen) -> Self {
// FileEntries depends on the kind of the overall wirepack, so this can't be implemented.
unimplemented!("use WirePackPartSequence::arbitrary instead")
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
let filename = self.filename.clone();
let self_history = self.history.clone();
let self_data = self.data.clone();
Box::new(
(self_history, self_data)
.shrink()
.map(move |(history, data)| Self {
filename: filename.clone(),
history,
data,
}),
)
}
}
impl HistoryEntry {
pub fn arbitrary_kind(g: &mut Gen, kind: Kind) -> Self {
let copy_from = match kind {
Kind::File => {
// 20% chance of generating copy-from info
if *g.choose(&[0, 1, 2, 3, 4]).unwrap() == 0 {
Some(RepoPath::FilePath(MPath::arbitrary(g)))
} else {
None
}
}
Kind::Tree => None,
};
Self {
node: HgNodeHash::arbitrary(g),
p1: HgNodeHash::arbitrary(g),
p2: HgNodeHash::arbitrary(g),
linknode: HgNodeHash::arbitrary(g),
copy_from,
}
}
}
impl Arbitrary for HistoryEntry {
fn arbitrary(_g: &mut Gen) -> Self
|
// Not going to get anything out of shrinking this since MPath is not shrinkable.
}
impl Arbitrary for DataEntry {
fn arbitrary(g: &mut Gen) -> Self {
// 20% chance of a fulltext revision
let (delta_base, delta) = if *g.choose(&[0, 1, 2, 3, 4]).unwrap() == 0 {
(NULL_HASH, Delta::new_fulltext(Vec::arbitrary(g)))
} else {
let mut delta_base = NULL_HASH;
while delta_base == NULL_HASH {
delta_base = HgNodeHash::arbitrary(g);
}
(delta_base, Delta::arbitrary(g))
};
// 50% chance of having metadata (i.e. being v2)
let metadata = if bool::arbitrary(g) {
// 50% chance of flags being present
let flags = if bool::arbitrary(g) { Some(1) } else { None };
// 50% chance of size being present
let size = if bool::arbitrary(g) { Some(2) } else { None };
Some(Metadata { flags, size })
} else {
None
};
Self {
node: HgNodeHash::arbitrary(g),
delta_base,
delta,
metadata,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
// The delta is the only shrinkable here. However, we cannot shrink it if we don't have
// base (this might generate a non-fulltext delta).
if self.delta_base == NULL_HASH {
empty_shrinker()
} else {
let node = self.node;
let delta_base = self.delta_base;
let metadata = self.metadata;
Box::new(self.delta.shrink().map(move |delta| Self {
node,
delta_base,
delta,
metadata,
}))
}
}
}
|
{
// HistoryEntry depends on the kind of the overall wirepack, so this can't be implemented.
unimplemented!("use WirePackPartSequence::arbitrary instead")
}
|
identifier_body
|
i64.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for `i64`
#[allow(non_uppercase_statics)];
use prelude::*;
use default::Default;
#[cfg(target_word_size = "64")]
use num::CheckedMul;
use num::{BitCount, CheckedAdd, CheckedSub};
use num::{CheckedDiv, Zero, One, strconv};
use num::{ToStrRadix, FromStrRadix};
use option::{Option, Some, None};
use str;
use unstable::intrinsics;
int_module!(i64, 64)
impl BitCount for i64 {
/// Counts the number of bits set. Wraps LLVM's `ctpop` intrinsic.
#[inline]
fn population_count(&self) -> i64 { unsafe { intrinsics::ctpop64(*self) } }
/// Counts the number of leading zeros. Wraps LLVM's `ctlz` intrinsic.
#[inline]
fn leading_zeros(&self) -> i64 { unsafe { intrinsics::ctlz64(*self) } }
/// Counts the number of trailing zeros. Wraps LLVM's `cttz` intrinsic.
#[inline]
fn trailing_zeros(&self) -> i64 { unsafe { intrinsics::cttz64(*self) } }
}
impl CheckedAdd for i64 {
#[inline]
fn checked_add(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_add_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
|
impl CheckedSub for i64 {
#[inline]
fn checked_sub(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_sub_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
// FIXME: #8449: should not be disabled on 32-bit
#[cfg(target_word_size = "64")]
impl CheckedMul for i64 {
#[inline]
fn checked_mul(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_mul_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
|
}
}
|
random_line_split
|
i64.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for `i64`
#[allow(non_uppercase_statics)];
use prelude::*;
use default::Default;
#[cfg(target_word_size = "64")]
use num::CheckedMul;
use num::{BitCount, CheckedAdd, CheckedSub};
use num::{CheckedDiv, Zero, One, strconv};
use num::{ToStrRadix, FromStrRadix};
use option::{Option, Some, None};
use str;
use unstable::intrinsics;
int_module!(i64, 64)
impl BitCount for i64 {
/// Counts the number of bits set. Wraps LLVM's `ctpop` intrinsic.
#[inline]
fn population_count(&self) -> i64 { unsafe { intrinsics::ctpop64(*self) } }
/// Counts the number of leading zeros. Wraps LLVM's `ctlz` intrinsic.
#[inline]
fn leading_zeros(&self) -> i64 { unsafe { intrinsics::ctlz64(*self) } }
/// Counts the number of trailing zeros. Wraps LLVM's `cttz` intrinsic.
#[inline]
fn trailing_zeros(&self) -> i64 { unsafe { intrinsics::cttz64(*self) } }
}
impl CheckedAdd for i64 {
#[inline]
fn checked_add(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_add_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
impl CheckedSub for i64 {
#[inline]
fn
|
(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_sub_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
// FIXME: #8449: should not be disabled on 32-bit
#[cfg(target_word_size = "64")]
impl CheckedMul for i64 {
#[inline]
fn checked_mul(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_mul_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
|
checked_sub
|
identifier_name
|
i64.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for `i64`
#[allow(non_uppercase_statics)];
use prelude::*;
use default::Default;
#[cfg(target_word_size = "64")]
use num::CheckedMul;
use num::{BitCount, CheckedAdd, CheckedSub};
use num::{CheckedDiv, Zero, One, strconv};
use num::{ToStrRadix, FromStrRadix};
use option::{Option, Some, None};
use str;
use unstable::intrinsics;
int_module!(i64, 64)
impl BitCount for i64 {
/// Counts the number of bits set. Wraps LLVM's `ctpop` intrinsic.
#[inline]
fn population_count(&self) -> i64 { unsafe { intrinsics::ctpop64(*self) } }
/// Counts the number of leading zeros. Wraps LLVM's `ctlz` intrinsic.
#[inline]
fn leading_zeros(&self) -> i64 { unsafe { intrinsics::ctlz64(*self) } }
/// Counts the number of trailing zeros. Wraps LLVM's `cttz` intrinsic.
#[inline]
fn trailing_zeros(&self) -> i64 { unsafe { intrinsics::cttz64(*self) } }
}
impl CheckedAdd for i64 {
#[inline]
fn checked_add(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_add_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
impl CheckedSub for i64 {
#[inline]
fn checked_sub(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_sub_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
// FIXME: #8449: should not be disabled on 32-bit
#[cfg(target_word_size = "64")]
impl CheckedMul for i64 {
#[inline]
fn checked_mul(&self, v: &i64) -> Option<i64>
|
}
|
{
unsafe {
let (x, y) = intrinsics::i64_mul_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
|
identifier_body
|
i64.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for `i64`
#[allow(non_uppercase_statics)];
use prelude::*;
use default::Default;
#[cfg(target_word_size = "64")]
use num::CheckedMul;
use num::{BitCount, CheckedAdd, CheckedSub};
use num::{CheckedDiv, Zero, One, strconv};
use num::{ToStrRadix, FromStrRadix};
use option::{Option, Some, None};
use str;
use unstable::intrinsics;
int_module!(i64, 64)
impl BitCount for i64 {
/// Counts the number of bits set. Wraps LLVM's `ctpop` intrinsic.
#[inline]
fn population_count(&self) -> i64 { unsafe { intrinsics::ctpop64(*self) } }
/// Counts the number of leading zeros. Wraps LLVM's `ctlz` intrinsic.
#[inline]
fn leading_zeros(&self) -> i64 { unsafe { intrinsics::ctlz64(*self) } }
/// Counts the number of trailing zeros. Wraps LLVM's `cttz` intrinsic.
#[inline]
fn trailing_zeros(&self) -> i64 { unsafe { intrinsics::cttz64(*self) } }
}
impl CheckedAdd for i64 {
#[inline]
fn checked_add(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_add_with_overflow(*self, *v);
if y
|
else { Some(x) }
}
}
}
impl CheckedSub for i64 {
#[inline]
fn checked_sub(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_sub_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
// FIXME: #8449: should not be disabled on 32-bit
#[cfg(target_word_size = "64")]
impl CheckedMul for i64 {
#[inline]
fn checked_mul(&self, v: &i64) -> Option<i64> {
unsafe {
let (x, y) = intrinsics::i64_mul_with_overflow(*self, *v);
if y { None } else { Some(x) }
}
}
}
|
{ None }
|
conditional_block
|
test_main.rs
|
#![feature(test)]
#![feature(conservative_impl_trait)]
#![feature(specialization)]
#[macro_use]
extern crate ndarray;
extern crate test;
extern crate rand;
extern crate ndarray_rand;
extern crate num;
extern crate chrono;
extern crate error_chain;
extern crate itertools;
extern crate rustc_serialize;
extern crate csv;
pub mod adapters;
pub mod dataframe;
pub mod tests;
#[macro_use]
pub mod util;
pub mod implement;
pub mod mixedtypes;
pub mod bench;
use ndarray::{arr1, arr2};
use dataframe::*;
use util::types::*;
use std::f64::NAN;
use util::traits::*;
use util::error::*;
use ndarray::{Axis, ArrayView};
use ndarray::stack;
use util::readcsv::*;
use mixedtypes::*;
use std::collections::BTreeMap;
fn
|
() {
let mut btree: BTreeMap<String, Row<f64>> = BTreeMap::new();
btree.insert("a".to_string(), arr1(&[2., 3., 2.]));
if let Err(ref e) = run() {
use ::std::io::Write;
let stderr = &mut ::std::io::stderr();
let errmsg = "Error writing to stderr";
writeln!(stderr, "error: {}", e).expect(errmsg);
for e in e.iter().skip(1) {
writeln!(stderr, "caused by: {}", e).expect(errmsg);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
writeln!(stderr, "backtrace: {:?}", backtrace).expect(errmsg);
}
::std::process::exit(1);
}
}
fn run() -> Result<()> {
let df: Result<DataFrame<InnerType, OuterType>> = DataFrame::read_csv("/Users/suchin/Github/rust-dataframe/src/tests/test.\
csv");
let a = arr2(&[[2., 7.], [3., NAN], [2., 4.]]);
// let b = arr2(&[[2., 6.], [3., 4.]]);
let c = arr2(&[[2., 6.], [3., 4.], [2., 1.]]);
let mut df: DataFrame<f64, String> = DataFrame::new(a).columns(&["a", "b"])?
.index(&["1", "2", "3"])?;
let df_1 = DataFrame::new(c).columns(&["c", "d"])?.index(&["1", "2", "3"])?;
let new_data = df.select(&["2"], UtahAxis::Row).as_array()?;
// let df_iter: DataFrameIterator = df_1.df_iter(UtahAxis::Row);
let j = df.df_iter(UtahAxis::Row)
.remove(&["1"])
.select(&["2"])
.append("8", new_data.view())
.sumdf()
.as_df()?;
let res: DataFrame<f64, String> = df.impute(ImputeStrategy::Mean, UtahAxis::Column)
.as_df()?;
let res_1: DataFrame<f64, String> = df.inner_left_join(&df_1).as_df()?;
let concat = df.concat(&df_1, UtahAxis::Row).as_df();
// let b = arr1(&[2., 3., 2.]);
let k: DataFrame<f64, String> = dataframe!(
{
"a" => col!([2., 3., 2.]),
"b" => col!([2., NAN, 2.])
});
println!("{:?}", k);
Ok(())
}
|
main
|
identifier_name
|
test_main.rs
|
#![feature(test)]
#![feature(conservative_impl_trait)]
#![feature(specialization)]
#[macro_use]
extern crate ndarray;
extern crate test;
extern crate rand;
extern crate ndarray_rand;
extern crate num;
extern crate chrono;
extern crate error_chain;
extern crate itertools;
extern crate rustc_serialize;
extern crate csv;
pub mod adapters;
pub mod dataframe;
pub mod tests;
#[macro_use]
pub mod util;
pub mod implement;
pub mod mixedtypes;
pub mod bench;
use ndarray::{arr1, arr2};
use dataframe::*;
use util::types::*;
use std::f64::NAN;
use util::traits::*;
use util::error::*;
use ndarray::{Axis, ArrayView};
use ndarray::stack;
use util::readcsv::*;
use mixedtypes::*;
use std::collections::BTreeMap;
fn main() {
let mut btree: BTreeMap<String, Row<f64>> = BTreeMap::new();
btree.insert("a".to_string(), arr1(&[2., 3., 2.]));
if let Err(ref e) = run()
|
}
fn run() -> Result<()> {
let df: Result<DataFrame<InnerType, OuterType>> = DataFrame::read_csv("/Users/suchin/Github/rust-dataframe/src/tests/test.\
csv");
let a = arr2(&[[2., 7.], [3., NAN], [2., 4.]]);
// let b = arr2(&[[2., 6.], [3., 4.]]);
let c = arr2(&[[2., 6.], [3., 4.], [2., 1.]]);
let mut df: DataFrame<f64, String> = DataFrame::new(a).columns(&["a", "b"])?
.index(&["1", "2", "3"])?;
let df_1 = DataFrame::new(c).columns(&["c", "d"])?.index(&["1", "2", "3"])?;
let new_data = df.select(&["2"], UtahAxis::Row).as_array()?;
// let df_iter: DataFrameIterator = df_1.df_iter(UtahAxis::Row);
let j = df.df_iter(UtahAxis::Row)
.remove(&["1"])
.select(&["2"])
.append("8", new_data.view())
.sumdf()
.as_df()?;
let res: DataFrame<f64, String> = df.impute(ImputeStrategy::Mean, UtahAxis::Column)
.as_df()?;
let res_1: DataFrame<f64, String> = df.inner_left_join(&df_1).as_df()?;
let concat = df.concat(&df_1, UtahAxis::Row).as_df();
// let b = arr1(&[2., 3., 2.]);
let k: DataFrame<f64, String> = dataframe!(
{
"a" => col!([2., 3., 2.]),
"b" => col!([2., NAN, 2.])
});
println!("{:?}", k);
Ok(())
}
|
{
use ::std::io::Write;
let stderr = &mut ::std::io::stderr();
let errmsg = "Error writing to stderr";
writeln!(stderr, "error: {}", e).expect(errmsg);
for e in e.iter().skip(1) {
writeln!(stderr, "caused by: {}", e).expect(errmsg);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
writeln!(stderr, "backtrace: {:?}", backtrace).expect(errmsg);
}
::std::process::exit(1);
}
|
conditional_block
|
test_main.rs
|
#![feature(test)]
#![feature(conservative_impl_trait)]
#![feature(specialization)]
#[macro_use]
extern crate ndarray;
extern crate test;
extern crate rand;
extern crate ndarray_rand;
extern crate num;
extern crate chrono;
extern crate error_chain;
extern crate itertools;
extern crate rustc_serialize;
extern crate csv;
pub mod adapters;
pub mod dataframe;
pub mod tests;
#[macro_use]
pub mod util;
pub mod implement;
pub mod mixedtypes;
pub mod bench;
use ndarray::{arr1, arr2};
use dataframe::*;
use util::types::*;
use std::f64::NAN;
use util::traits::*;
use util::error::*;
use ndarray::{Axis, ArrayView};
use ndarray::stack;
use util::readcsv::*;
use mixedtypes::*;
use std::collections::BTreeMap;
fn main() {
let mut btree: BTreeMap<String, Row<f64>> = BTreeMap::new();
btree.insert("a".to_string(), arr1(&[2., 3., 2.]));
if let Err(ref e) = run() {
use ::std::io::Write;
let stderr = &mut ::std::io::stderr();
let errmsg = "Error writing to stderr";
writeln!(stderr, "error: {}", e).expect(errmsg);
for e in e.iter().skip(1) {
writeln!(stderr, "caused by: {}", e).expect(errmsg);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
writeln!(stderr, "backtrace: {:?}", backtrace).expect(errmsg);
}
::std::process::exit(1);
}
}
fn run() -> Result<()>
|
.as_df()?;
let res_1: DataFrame<f64, String> = df.inner_left_join(&df_1).as_df()?;
let concat = df.concat(&df_1, UtahAxis::Row).as_df();
// let b = arr1(&[2., 3., 2.]);
let k: DataFrame<f64, String> = dataframe!(
{
"a" => col!([2., 3., 2.]),
"b" => col!([2., NAN, 2.])
});
println!("{:?}", k);
Ok(())
}
|
{
let df: Result<DataFrame<InnerType, OuterType>> = DataFrame::read_csv("/Users/suchin/Github/rust-dataframe/src/tests/test.\
csv");
let a = arr2(&[[2., 7.], [3., NAN], [2., 4.]]);
// let b = arr2(&[[2., 6.], [3., 4.]]);
let c = arr2(&[[2., 6.], [3., 4.], [2., 1.]]);
let mut df: DataFrame<f64, String> = DataFrame::new(a).columns(&["a", "b"])?
.index(&["1", "2", "3"])?;
let df_1 = DataFrame::new(c).columns(&["c", "d"])?.index(&["1", "2", "3"])?;
let new_data = df.select(&["2"], UtahAxis::Row).as_array()?;
// let df_iter: DataFrameIterator = df_1.df_iter(UtahAxis::Row);
let j = df.df_iter(UtahAxis::Row)
.remove(&["1"])
.select(&["2"])
.append("8", new_data.view())
.sumdf()
.as_df()?;
let res: DataFrame<f64, String> = df.impute(ImputeStrategy::Mean, UtahAxis::Column)
|
identifier_body
|
test_main.rs
|
#![feature(test)]
#![feature(conservative_impl_trait)]
#![feature(specialization)]
#[macro_use]
extern crate ndarray;
extern crate test;
extern crate rand;
extern crate ndarray_rand;
extern crate num;
extern crate chrono;
extern crate error_chain;
extern crate itertools;
extern crate rustc_serialize;
extern crate csv;
pub mod adapters;
pub mod dataframe;
pub mod tests;
#[macro_use]
pub mod util;
pub mod implement;
pub mod mixedtypes;
pub mod bench;
use ndarray::{arr1, arr2};
use dataframe::*;
use util::types::*;
use std::f64::NAN;
use util::traits::*;
use util::error::*;
use ndarray::{Axis, ArrayView};
use ndarray::stack;
use util::readcsv::*;
use mixedtypes::*;
use std::collections::BTreeMap;
fn main() {
let mut btree: BTreeMap<String, Row<f64>> = BTreeMap::new();
btree.insert("a".to_string(), arr1(&[2., 3., 2.]));
if let Err(ref e) = run() {
use ::std::io::Write;
let stderr = &mut ::std::io::stderr();
let errmsg = "Error writing to stderr";
writeln!(stderr, "error: {}", e).expect(errmsg);
for e in e.iter().skip(1) {
writeln!(stderr, "caused by: {}", e).expect(errmsg);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
writeln!(stderr, "backtrace: {:?}", backtrace).expect(errmsg);
}
::std::process::exit(1);
}
}
fn run() -> Result<()> {
let df: Result<DataFrame<InnerType, OuterType>> = DataFrame::read_csv("/Users/suchin/Github/rust-dataframe/src/tests/test.\
csv");
|
let mut df: DataFrame<f64, String> = DataFrame::new(a).columns(&["a", "b"])?
.index(&["1", "2", "3"])?;
let df_1 = DataFrame::new(c).columns(&["c", "d"])?.index(&["1", "2", "3"])?;
let new_data = df.select(&["2"], UtahAxis::Row).as_array()?;
// let df_iter: DataFrameIterator = df_1.df_iter(UtahAxis::Row);
let j = df.df_iter(UtahAxis::Row)
.remove(&["1"])
.select(&["2"])
.append("8", new_data.view())
.sumdf()
.as_df()?;
let res: DataFrame<f64, String> = df.impute(ImputeStrategy::Mean, UtahAxis::Column)
.as_df()?;
let res_1: DataFrame<f64, String> = df.inner_left_join(&df_1).as_df()?;
let concat = df.concat(&df_1, UtahAxis::Row).as_df();
// let b = arr1(&[2., 3., 2.]);
let k: DataFrame<f64, String> = dataframe!(
{
"a" => col!([2., 3., 2.]),
"b" => col!([2., NAN, 2.])
});
println!("{:?}", k);
Ok(())
}
|
let a = arr2(&[[2., 7.], [3., NAN], [2., 4.]]);
// let b = arr2(&[[2., 6.], [3., 4.]]);
let c = arr2(&[[2., 6.], [3., 4.], [2., 1.]]);
|
random_line_split
|
logname.rs
|
extern crate libc;
extern crate getopts;
extern crate coreutils;
use libc::funcs::posix88::unistd::getlogin;
use std::c_str::CString;
use std::os;
fn
|
() {
let args = os::args();
let program = args[0].clone();
let opts = &[
getopts::optflag("h", "help", "display this help and exit"),
getopts::optflag("V", "version", "output version information and exit"),
];
let matches = match getopts::getopts(args.tail(), opts) {
Ok(m) => { m },
Err(f) => {
println!("{}", f.to_string());
println!("-------------------------------");
println!("Usage: {}", getopts::usage(program.as_slice(), opts));
return;
},
};
if matches.opt_present("help") {
println!("Usage: {}", getopts::usage(program.as_slice(), opts));
return;
}
if matches.opt_present("version") {
println!("{}: {}", program, coreutils::version());
return;
}
let login = unsafe {
CString::new(
getlogin() as *const libc::c_char,
false
)
};
match login.as_str() {
Some(username) => println!("{}", username),
None => os::set_exit_status(1),
}
}
|
main
|
identifier_name
|
logname.rs
|
extern crate libc;
extern crate getopts;
extern crate coreutils;
use libc::funcs::posix88::unistd::getlogin;
use std::c_str::CString;
use std::os;
fn main()
|
if matches.opt_present("help") {
println!("Usage: {}", getopts::usage(program.as_slice(), opts));
return;
}
if matches.opt_present("version") {
println!("{}: {}", program, coreutils::version());
return;
}
let login = unsafe {
CString::new(
getlogin() as *const libc::c_char,
false
)
};
match login.as_str() {
Some(username) => println!("{}", username),
None => os::set_exit_status(1),
}
}
|
{
let args = os::args();
let program = args[0].clone();
let opts = &[
getopts::optflag("h", "help", "display this help and exit"),
getopts::optflag("V", "version", "output version information and exit"),
];
let matches = match getopts::getopts(args.tail(), opts) {
Ok(m) => { m },
Err(f) => {
println!("{}", f.to_string());
println!("-------------------------------");
println!("Usage: {}", getopts::usage(program.as_slice(), opts));
return;
},
};
|
identifier_body
|
logname.rs
|
extern crate libc;
extern crate getopts;
extern crate coreutils;
use libc::funcs::posix88::unistd::getlogin;
use std::c_str::CString;
use std::os;
fn main() {
let args = os::args();
let program = args[0].clone();
let opts = &[
getopts::optflag("h", "help", "display this help and exit"),
getopts::optflag("V", "version", "output version information and exit"),
];
let matches = match getopts::getopts(args.tail(), opts) {
Ok(m) => { m },
Err(f) => {
println!("{}", f.to_string());
println!("-------------------------------");
println!("Usage: {}", getopts::usage(program.as_slice(), opts));
return;
},
|
return;
}
if matches.opt_present("version") {
println!("{}: {}", program, coreutils::version());
return;
}
let login = unsafe {
CString::new(
getlogin() as *const libc::c_char,
false
)
};
match login.as_str() {
Some(username) => println!("{}", username),
None => os::set_exit_status(1),
}
}
|
};
if matches.opt_present("help") {
println!("Usage: {}", getopts::usage(program.as_slice(), opts));
|
random_line_split
|
args.rs
|
use clap::{arg_enum, Clap};
arg_enum! {
#[derive(Debug)]
pub enum RenderDevice {
Cpu,
Gpu
}
}
impl RenderDevice {
pub fn to_str(&self) -> &str
|
}
#[clap(version = env!("CARGO_PKG_VERSION"))]
#[derive(Clap, Debug)]
pub struct Args {
/// Which address to listen on.
#[clap(default_value = "0.0.0.0", long = "address", short = "a")]
pub address: String,
/// By default elmyra uses a bundled version of blender (strongly recommended), you can override it with this if you know what you're doing or if would like to play and learn.
#[clap(long = "blender-path")]
pub blender_path: Option<String>,
/// The directory to store visualization files and rendered material in, by default elmyra's runtime directory (= the one where the executable and bundled resources are located) is used.
#[clap(long = "data-dir")]
pub data_dir: Option<String>,
/// By default elmyra runs a renderer process in the background, this option disables it.
#[clap(long = "disable-rendering")]
pub disable_rendering: bool,
/// By default elmyra uses a bundled version of ffmpeg (strongly recommended), you can override it with this if you know what you're doing or if you would like to play and learn.
#[clap(long = "ffmpeg-path")]
pub ffmpeg_path: Option<String>,
/// Which port to listen on.
#[clap(default_value = "8080", short = "p", long = "port")]
pub port: u16,
/// Customize which computing device the renderer should use, that is: CPU or GPU
#[clap(case_insensitive = true, default_value = "CPU", long = "render-device", possible_values = &RenderDevice::variants())]
pub render_device: RenderDevice,
/// Customize how many seconds the renderer should spend on each visualization (they are rendered in turns) - note that this is a mininum suggestion: if a single rendering action takes longer than the target time, the renderer only moves to the next visualization when the action has completed.
#[clap(default_value = "60", long = "render-target-time")]
pub render_target_time: usize
}
|
{
match self {
Self::Cpu => "CPU",
Self::Gpu => "GPU"
}
}
|
identifier_body
|
args.rs
|
use clap::{arg_enum, Clap};
arg_enum! {
#[derive(Debug)]
pub enum RenderDevice {
Cpu,
Gpu
}
}
impl RenderDevice {
pub fn to_str(&self) -> &str {
match self {
Self::Cpu => "CPU",
Self::Gpu => "GPU"
}
}
}
#[clap(version = env!("CARGO_PKG_VERSION"))]
#[derive(Clap, Debug)]
pub struct
|
{
/// Which address to listen on.
#[clap(default_value = "0.0.0.0", long = "address", short = "a")]
pub address: String,
/// By default elmyra uses a bundled version of blender (strongly recommended), you can override it with this if you know what you're doing or if would like to play and learn.
#[clap(long = "blender-path")]
pub blender_path: Option<String>,
/// The directory to store visualization files and rendered material in, by default elmyra's runtime directory (= the one where the executable and bundled resources are located) is used.
#[clap(long = "data-dir")]
pub data_dir: Option<String>,
/// By default elmyra runs a renderer process in the background, this option disables it.
#[clap(long = "disable-rendering")]
pub disable_rendering: bool,
/// By default elmyra uses a bundled version of ffmpeg (strongly recommended), you can override it with this if you know what you're doing or if you would like to play and learn.
#[clap(long = "ffmpeg-path")]
pub ffmpeg_path: Option<String>,
/// Which port to listen on.
#[clap(default_value = "8080", short = "p", long = "port")]
pub port: u16,
/// Customize which computing device the renderer should use, that is: CPU or GPU
#[clap(case_insensitive = true, default_value = "CPU", long = "render-device", possible_values = &RenderDevice::variants())]
pub render_device: RenderDevice,
/// Customize how many seconds the renderer should spend on each visualization (they are rendered in turns) - note that this is a mininum suggestion: if a single rendering action takes longer than the target time, the renderer only moves to the next visualization when the action has completed.
#[clap(default_value = "60", long = "render-target-time")]
pub render_target_time: usize
}
|
Args
|
identifier_name
|
args.rs
|
use clap::{arg_enum, Clap};
arg_enum! {
#[derive(Debug)]
pub enum RenderDevice {
Cpu,
Gpu
}
}
impl RenderDevice {
pub fn to_str(&self) -> &str {
match self {
Self::Cpu => "CPU",
Self::Gpu => "GPU"
}
}
}
#[clap(version = env!("CARGO_PKG_VERSION"))]
#[derive(Clap, Debug)]
pub struct Args {
/// Which address to listen on.
#[clap(default_value = "0.0.0.0", long = "address", short = "a")]
pub address: String,
/// By default elmyra uses a bundled version of blender (strongly recommended), you can override it with this if you know what you're doing or if would like to play and learn.
#[clap(long = "blender-path")]
pub blender_path: Option<String>,
/// The directory to store visualization files and rendered material in, by default elmyra's runtime directory (= the one where the executable and bundled resources are located) is used.
#[clap(long = "data-dir")]
pub data_dir: Option<String>,
/// By default elmyra runs a renderer process in the background, this option disables it.
#[clap(long = "disable-rendering")]
pub disable_rendering: bool,
/// By default elmyra uses a bundled version of ffmpeg (strongly recommended), you can override it with this if you know what you're doing or if you would like to play and learn.
#[clap(long = "ffmpeg-path")]
pub ffmpeg_path: Option<String>,
/// Which port to listen on.
#[clap(default_value = "8080", short = "p", long = "port")]
pub port: u16,
/// Customize which computing device the renderer should use, that is: CPU or GPU
#[clap(case_insensitive = true, default_value = "CPU", long = "render-device", possible_values = &RenderDevice::variants())]
pub render_device: RenderDevice,
/// Customize how many seconds the renderer should spend on each visualization (they are rendered in turns) - note that this is a mininum suggestion: if a single rendering action takes longer than the target time, the renderer only moves to the next visualization when the action has completed.
#[clap(default_value = "60", long = "render-target-time")]
pub render_target_time: usize
|
}
|
random_line_split
|
|
issue-14308.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
A(..) => 1i
};
assert_eq!(x, 1);
let x = match A(4) {
A(1) => 1i,
A(..) => 2i
};
assert_eq!(x, 2);
// This next test uses a (..) wildcard match on a nullary struct.
// There's no particularly good reason to support this, but it's currently allowed,
// and this makes sure it doesn't ICE or break LLVM.
let x = match B {
B(..) => 3i
};
assert_eq!(x, 3);
}
|
struct A(int);
struct B;
fn main() {
let x = match A(3) {
|
random_line_split
|
issue-14308.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct A(int);
struct B;
fn main()
|
{
let x = match A(3) {
A(..) => 1i
};
assert_eq!(x, 1);
let x = match A(4) {
A(1) => 1i,
A(..) => 2i
};
assert_eq!(x, 2);
// This next test uses a (..) wildcard match on a nullary struct.
// There's no particularly good reason to support this, but it's currently allowed,
// and this makes sure it doesn't ICE or break LLVM.
let x = match B {
B(..) => 3i
};
assert_eq!(x, 3);
}
|
identifier_body
|
|
issue-14308.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct A(int);
struct
|
;
fn main() {
let x = match A(3) {
A(..) => 1i
};
assert_eq!(x, 1);
let x = match A(4) {
A(1) => 1i,
A(..) => 2i
};
assert_eq!(x, 2);
// This next test uses a (..) wildcard match on a nullary struct.
// There's no particularly good reason to support this, but it's currently allowed,
// and this makes sure it doesn't ICE or break LLVM.
let x = match B {
B(..) => 3i
};
assert_eq!(x, 3);
}
|
B
|
identifier_name
|
event_content.rs
|
//! Implementations of the MessageEventContent and StateEventContent derive macro.
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{
parse::{Parse, ParseStream},
DeriveInput, LitStr, Token,
};
/// Parses attributes for `*EventContent` derives.
///
/// `#[ruma_event(type = "m.room.alias")]`
enum EventMeta {
/// Variant holds the "m.whatever" event type.
Type(LitStr),
}
impl Parse for EventMeta {
fn
|
(input: ParseStream) -> syn::Result<Self> {
input.parse::<Token![type]>()?;
input.parse::<Token![=]>()?;
Ok(EventMeta::Type(input.parse::<LitStr>()?))
}
}
/// Create an `EventContent` implementation for a struct.
pub fn expand_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = &input.ident;
let event_type_attr = input
.attrs
.iter()
.find(|attr| attr.path.is_ident("ruma_event"))
.ok_or_else(|| {
let msg = "no event type attribute found, \
add `#[ruma_event(type = \"any.room.event\")]` \
below the event content derive";
syn::Error::new(Span::call_site(), msg)
})?;
let event_type = {
let event_meta = event_type_attr.parse_args::<EventMeta>()?;
let EventMeta::Type(lit) = event_meta;
lit
};
Ok(quote! {
impl ::ruma_events::EventContent for #ident {
fn event_type(&self) -> &str {
#event_type
}
fn from_parts(
ev_type: &str,
content: Box<::serde_json::value::RawValue>
) -> Result<Self, String> {
if ev_type!= #event_type {
return Err(format!("expected `{}` found {}", #event_type, ev_type));
}
::serde_json::from_str(content.get()).map_err(|e| e.to_string())
}
}
})
}
/// Create a `BasicEventContent` implementation for a struct
pub fn expand_basic_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::BasicEventContent for #ident { }
})
}
/// Create a `EphemeralRoomEventContent` implementation for a struct
pub fn expand_ephemeral_room_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::EphemeralRoomEventContent for #ident { }
})
}
/// Create a `RoomEventContent` implementation for a struct.
pub fn expand_room_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::RoomEventContent for #ident { }
})
}
/// Create a `MessageEventContent` implementation for a struct
pub fn expand_message_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let room_ev_content = expand_room_event_content(input)?;
Ok(quote! {
#room_ev_content
impl ::ruma_events::MessageEventContent for #ident { }
})
}
/// Create a `StateEventContent` implementation for a struct
pub fn expand_state_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let room_ev_content = expand_room_event_content(input)?;
Ok(quote! {
#room_ev_content
impl ::ruma_events::StateEventContent for #ident { }
})
}
|
parse
|
identifier_name
|
event_content.rs
|
//! Implementations of the MessageEventContent and StateEventContent derive macro.
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{
parse::{Parse, ParseStream},
DeriveInput, LitStr, Token,
};
/// Parses attributes for `*EventContent` derives.
///
/// `#[ruma_event(type = "m.room.alias")]`
enum EventMeta {
/// Variant holds the "m.whatever" event type.
Type(LitStr),
}
impl Parse for EventMeta {
fn parse(input: ParseStream) -> syn::Result<Self> {
input.parse::<Token![type]>()?;
input.parse::<Token![=]>()?;
Ok(EventMeta::Type(input.parse::<LitStr>()?))
}
}
/// Create an `EventContent` implementation for a struct.
pub fn expand_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = &input.ident;
let event_type_attr = input
.attrs
.iter()
.find(|attr| attr.path.is_ident("ruma_event"))
.ok_or_else(|| {
let msg = "no event type attribute found, \
add `#[ruma_event(type = \"any.room.event\")]` \
below the event content derive";
|
let event_type = {
let event_meta = event_type_attr.parse_args::<EventMeta>()?;
let EventMeta::Type(lit) = event_meta;
lit
};
Ok(quote! {
impl ::ruma_events::EventContent for #ident {
fn event_type(&self) -> &str {
#event_type
}
fn from_parts(
ev_type: &str,
content: Box<::serde_json::value::RawValue>
) -> Result<Self, String> {
if ev_type!= #event_type {
return Err(format!("expected `{}` found {}", #event_type, ev_type));
}
::serde_json::from_str(content.get()).map_err(|e| e.to_string())
}
}
})
}
/// Create a `BasicEventContent` implementation for a struct
pub fn expand_basic_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::BasicEventContent for #ident { }
})
}
/// Create a `EphemeralRoomEventContent` implementation for a struct
pub fn expand_ephemeral_room_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::EphemeralRoomEventContent for #ident { }
})
}
/// Create a `RoomEventContent` implementation for a struct.
pub fn expand_room_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::RoomEventContent for #ident { }
})
}
/// Create a `MessageEventContent` implementation for a struct
pub fn expand_message_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let room_ev_content = expand_room_event_content(input)?;
Ok(quote! {
#room_ev_content
impl ::ruma_events::MessageEventContent for #ident { }
})
}
/// Create a `StateEventContent` implementation for a struct
pub fn expand_state_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let room_ev_content = expand_room_event_content(input)?;
Ok(quote! {
#room_ev_content
impl ::ruma_events::StateEventContent for #ident { }
})
}
|
syn::Error::new(Span::call_site(), msg)
})?;
|
random_line_split
|
event_content.rs
|
//! Implementations of the MessageEventContent and StateEventContent derive macro.
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{
parse::{Parse, ParseStream},
DeriveInput, LitStr, Token,
};
/// Parses attributes for `*EventContent` derives.
///
/// `#[ruma_event(type = "m.room.alias")]`
enum EventMeta {
/// Variant holds the "m.whatever" event type.
Type(LitStr),
}
impl Parse for EventMeta {
fn parse(input: ParseStream) -> syn::Result<Self> {
input.parse::<Token![type]>()?;
input.parse::<Token![=]>()?;
Ok(EventMeta::Type(input.parse::<LitStr>()?))
}
}
/// Create an `EventContent` implementation for a struct.
pub fn expand_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = &input.ident;
let event_type_attr = input
.attrs
.iter()
.find(|attr| attr.path.is_ident("ruma_event"))
.ok_or_else(|| {
let msg = "no event type attribute found, \
add `#[ruma_event(type = \"any.room.event\")]` \
below the event content derive";
syn::Error::new(Span::call_site(), msg)
})?;
let event_type = {
let event_meta = event_type_attr.parse_args::<EventMeta>()?;
let EventMeta::Type(lit) = event_meta;
lit
};
Ok(quote! {
impl ::ruma_events::EventContent for #ident {
fn event_type(&self) -> &str {
#event_type
}
fn from_parts(
ev_type: &str,
content: Box<::serde_json::value::RawValue>
) -> Result<Self, String> {
if ev_type!= #event_type {
return Err(format!("expected `{}` found {}", #event_type, ev_type));
}
::serde_json::from_str(content.get()).map_err(|e| e.to_string())
}
}
})
}
/// Create a `BasicEventContent` implementation for a struct
pub fn expand_basic_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::BasicEventContent for #ident { }
})
}
/// Create a `EphemeralRoomEventContent` implementation for a struct
pub fn expand_ephemeral_room_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::EphemeralRoomEventContent for #ident { }
})
}
/// Create a `RoomEventContent` implementation for a struct.
pub fn expand_room_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let event_content_impl = expand_event_content(input)?;
Ok(quote! {
#event_content_impl
impl ::ruma_events::RoomEventContent for #ident { }
})
}
/// Create a `MessageEventContent` implementation for a struct
pub fn expand_message_event_content(input: DeriveInput) -> syn::Result<TokenStream> {
let ident = input.ident.clone();
let room_ev_content = expand_room_event_content(input)?;
Ok(quote! {
#room_ev_content
impl ::ruma_events::MessageEventContent for #ident { }
})
}
/// Create a `StateEventContent` implementation for a struct
pub fn expand_state_event_content(input: DeriveInput) -> syn::Result<TokenStream>
|
{
let ident = input.ident.clone();
let room_ev_content = expand_room_event_content(input)?;
Ok(quote! {
#room_ev_content
impl ::ruma_events::StateEventContent for #ident { }
})
}
|
identifier_body
|
|
server.rs
|
use mio::{Handler, Token, EventLoop, EventSet, PollOpt};
use mio::tcp::TcpListener;
use mio::util::Slab;
use std::net::SocketAddr;
use connection::Connection;
use error::Result;
const SERVER: Token = Token(0);
struct Server {
listener: TcpListener,
connections: Slab<Connection>,
}
impl Server {
fn new(listener: TcpListener) -> Self {
let slab = Slab::new_starting_at(Token(1), 1024);
Server {
listener: listener,
connections: slab,
}
}
pub fn run(addr: SocketAddr) -> Result<()> {
let listener = try!(TcpListener::bind(&addr));
let mut event_loop = try!(EventLoop::new());
try!(event_loop.register(&listener, SERVER, EventSet::readable(), PollOpt::edge()));
let mut server = Self::new(listener);
event_loop.run(&mut server);
Ok(())
}
fn accept_new(&mut self, event_loop: &mut EventLoop<Server>) {
match self.listener.accept() {
Ok(Some((socket, addr))) => {
info!("New Connection from {}", addr);
let token = self.connections
.insert_with(|token| Connection::new(socket, token))
.unwrap();
event_loop.register(&self.connections[token].socket,
token,
EventSet::readable(), // TODO hup?
PollOpt::edge())
.unwrap();
|
}
}
}
}
impl Handler for Server {
type Timeout = ();
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<Server>, token: Token, events: EventSet) {
match token {
SERVER => self.accept_new(event_loop),
_ => {
if events.is_readable() {
self.connections[token].read()
}
if events.is_writable() {
self.connections[token].write()
}
if events.is_hup() {}
if events.is_error() {}
if self.connections[token].is_closed() {
event_loop.deregister(&self.connections[token].socket);
let _ = self.connections.remove(token);
}
}
}
}
fn timeout(&mut self, event_loop: &mut EventLoop<Server>, timeout: Self::Timeout) {}
}
#[cfg(test)]
mod test {
use std::io::{BufRead, BufReader, Read, Write};
use std::net::TcpStream;
extern crate env_logger;
const HOST: &'static str = "127.0.0.1:60254";
fn start_server() {
use std::thread;
use std::time::Duration;
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
INIT.call_once(|| {
thread::spawn(|| {
info!("running server");
super::Server::run(HOST.parse().unwrap()).unwrap();
});
thread::sleep(Duration::from_millis(1000));
});
println!("running");
}
#[test]
fn test_server() {
let _ = env_logger::init();
start_server();
let mut sock = BufReader::new(TcpStream::connect(HOST).unwrap());
let mut recv = String::new();
sock.get_mut().write_all(b"hello world\n").unwrap();
// assert_eq!(recv, "hello world\n");
recv.clear();
sock.get_mut().write_all(b"this is a line\n").unwrap();
// assert_eq!(recv, "this is a line\n")
}
}
|
}
Ok(None) => {}
Err(e) => {
// TODO handle
event_loop.shutdown();
|
random_line_split
|
server.rs
|
use mio::{Handler, Token, EventLoop, EventSet, PollOpt};
use mio::tcp::TcpListener;
use mio::util::Slab;
use std::net::SocketAddr;
use connection::Connection;
use error::Result;
const SERVER: Token = Token(0);
struct Server {
listener: TcpListener,
connections: Slab<Connection>,
}
impl Server {
fn new(listener: TcpListener) -> Self {
let slab = Slab::new_starting_at(Token(1), 1024);
Server {
listener: listener,
connections: slab,
}
}
pub fn run(addr: SocketAddr) -> Result<()> {
let listener = try!(TcpListener::bind(&addr));
let mut event_loop = try!(EventLoop::new());
try!(event_loop.register(&listener, SERVER, EventSet::readable(), PollOpt::edge()));
let mut server = Self::new(listener);
event_loop.run(&mut server);
Ok(())
}
fn accept_new(&mut self, event_loop: &mut EventLoop<Server>)
|
}
impl Handler for Server {
type Timeout = ();
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<Server>, token: Token, events: EventSet) {
match token {
SERVER => self.accept_new(event_loop),
_ => {
if events.is_readable() {
self.connections[token].read()
}
if events.is_writable() {
self.connections[token].write()
}
if events.is_hup() {}
if events.is_error() {}
if self.connections[token].is_closed() {
event_loop.deregister(&self.connections[token].socket);
let _ = self.connections.remove(token);
}
}
}
}
fn timeout(&mut self, event_loop: &mut EventLoop<Server>, timeout: Self::Timeout) {}
}
#[cfg(test)]
mod test {
use std::io::{BufRead, BufReader, Read, Write};
use std::net::TcpStream;
extern crate env_logger;
const HOST: &'static str = "127.0.0.1:60254";
fn start_server() {
use std::thread;
use std::time::Duration;
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
INIT.call_once(|| {
thread::spawn(|| {
info!("running server");
super::Server::run(HOST.parse().unwrap()).unwrap();
});
thread::sleep(Duration::from_millis(1000));
});
println!("running");
}
#[test]
fn test_server() {
let _ = env_logger::init();
start_server();
let mut sock = BufReader::new(TcpStream::connect(HOST).unwrap());
let mut recv = String::new();
sock.get_mut().write_all(b"hello world\n").unwrap();
// assert_eq!(recv, "hello world\n");
recv.clear();
sock.get_mut().write_all(b"this is a line\n").unwrap();
// assert_eq!(recv, "this is a line\n")
}
}
|
{
match self.listener.accept() {
Ok(Some((socket, addr))) => {
info!("New Connection from {}", addr);
let token = self.connections
.insert_with(|token| Connection::new(socket, token))
.unwrap();
event_loop.register(&self.connections[token].socket,
token,
EventSet::readable(), // TODO hup?
PollOpt::edge())
.unwrap();
}
Ok(None) => {}
Err(e) => {
// TODO handle
event_loop.shutdown();
}
}
}
|
identifier_body
|
server.rs
|
use mio::{Handler, Token, EventLoop, EventSet, PollOpt};
use mio::tcp::TcpListener;
use mio::util::Slab;
use std::net::SocketAddr;
use connection::Connection;
use error::Result;
const SERVER: Token = Token(0);
struct Server {
listener: TcpListener,
connections: Slab<Connection>,
}
impl Server {
fn new(listener: TcpListener) -> Self {
let slab = Slab::new_starting_at(Token(1), 1024);
Server {
listener: listener,
connections: slab,
}
}
pub fn run(addr: SocketAddr) -> Result<()> {
let listener = try!(TcpListener::bind(&addr));
let mut event_loop = try!(EventLoop::new());
try!(event_loop.register(&listener, SERVER, EventSet::readable(), PollOpt::edge()));
let mut server = Self::new(listener);
event_loop.run(&mut server);
Ok(())
}
fn accept_new(&mut self, event_loop: &mut EventLoop<Server>) {
match self.listener.accept() {
Ok(Some((socket, addr))) => {
info!("New Connection from {}", addr);
let token = self.connections
.insert_with(|token| Connection::new(socket, token))
.unwrap();
event_loop.register(&self.connections[token].socket,
token,
EventSet::readable(), // TODO hup?
PollOpt::edge())
.unwrap();
}
Ok(None) => {}
Err(e) => {
// TODO handle
event_loop.shutdown();
}
}
}
}
impl Handler for Server {
type Timeout = ();
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<Server>, token: Token, events: EventSet) {
match token {
SERVER => self.accept_new(event_loop),
_ => {
if events.is_readable() {
self.connections[token].read()
}
if events.is_writable()
|
if events.is_hup() {}
if events.is_error() {}
if self.connections[token].is_closed() {
event_loop.deregister(&self.connections[token].socket);
let _ = self.connections.remove(token);
}
}
}
}
fn timeout(&mut self, event_loop: &mut EventLoop<Server>, timeout: Self::Timeout) {}
}
#[cfg(test)]
mod test {
use std::io::{BufRead, BufReader, Read, Write};
use std::net::TcpStream;
extern crate env_logger;
const HOST: &'static str = "127.0.0.1:60254";
fn start_server() {
use std::thread;
use std::time::Duration;
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
INIT.call_once(|| {
thread::spawn(|| {
info!("running server");
super::Server::run(HOST.parse().unwrap()).unwrap();
});
thread::sleep(Duration::from_millis(1000));
});
println!("running");
}
#[test]
fn test_server() {
let _ = env_logger::init();
start_server();
let mut sock = BufReader::new(TcpStream::connect(HOST).unwrap());
let mut recv = String::new();
sock.get_mut().write_all(b"hello world\n").unwrap();
// assert_eq!(recv, "hello world\n");
recv.clear();
sock.get_mut().write_all(b"this is a line\n").unwrap();
// assert_eq!(recv, "this is a line\n")
}
}
|
{
self.connections[token].write()
}
|
conditional_block
|
server.rs
|
use mio::{Handler, Token, EventLoop, EventSet, PollOpt};
use mio::tcp::TcpListener;
use mio::util::Slab;
use std::net::SocketAddr;
use connection::Connection;
use error::Result;
const SERVER: Token = Token(0);
struct Server {
listener: TcpListener,
connections: Slab<Connection>,
}
impl Server {
fn new(listener: TcpListener) -> Self {
let slab = Slab::new_starting_at(Token(1), 1024);
Server {
listener: listener,
connections: slab,
}
}
pub fn
|
(addr: SocketAddr) -> Result<()> {
let listener = try!(TcpListener::bind(&addr));
let mut event_loop = try!(EventLoop::new());
try!(event_loop.register(&listener, SERVER, EventSet::readable(), PollOpt::edge()));
let mut server = Self::new(listener);
event_loop.run(&mut server);
Ok(())
}
fn accept_new(&mut self, event_loop: &mut EventLoop<Server>) {
match self.listener.accept() {
Ok(Some((socket, addr))) => {
info!("New Connection from {}", addr);
let token = self.connections
.insert_with(|token| Connection::new(socket, token))
.unwrap();
event_loop.register(&self.connections[token].socket,
token,
EventSet::readable(), // TODO hup?
PollOpt::edge())
.unwrap();
}
Ok(None) => {}
Err(e) => {
// TODO handle
event_loop.shutdown();
}
}
}
}
impl Handler for Server {
type Timeout = ();
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<Server>, token: Token, events: EventSet) {
match token {
SERVER => self.accept_new(event_loop),
_ => {
if events.is_readable() {
self.connections[token].read()
}
if events.is_writable() {
self.connections[token].write()
}
if events.is_hup() {}
if events.is_error() {}
if self.connections[token].is_closed() {
event_loop.deregister(&self.connections[token].socket);
let _ = self.connections.remove(token);
}
}
}
}
fn timeout(&mut self, event_loop: &mut EventLoop<Server>, timeout: Self::Timeout) {}
}
#[cfg(test)]
mod test {
use std::io::{BufRead, BufReader, Read, Write};
use std::net::TcpStream;
extern crate env_logger;
const HOST: &'static str = "127.0.0.1:60254";
fn start_server() {
use std::thread;
use std::time::Duration;
use std::sync::{Once, ONCE_INIT};
static INIT: Once = ONCE_INIT;
INIT.call_once(|| {
thread::spawn(|| {
info!("running server");
super::Server::run(HOST.parse().unwrap()).unwrap();
});
thread::sleep(Duration::from_millis(1000));
});
println!("running");
}
#[test]
fn test_server() {
let _ = env_logger::init();
start_server();
let mut sock = BufReader::new(TcpStream::connect(HOST).unwrap());
let mut recv = String::new();
sock.get_mut().write_all(b"hello world\n").unwrap();
// assert_eq!(recv, "hello world\n");
recv.clear();
sock.get_mut().write_all(b"this is a line\n").unwrap();
// assert_eq!(recv, "this is a line\n")
}
}
|
run
|
identifier_name
|
Concurrent.rs
|
extern crate sync;
use sync::{Arc, Mutex};
fn main ()
|
// spawn(proc() {
// let numbers = rx.recv();
// println!("{}", numbers[num]);
// })
// }
// let numbers = ~[1,2,3];
// let numbers = Arc::new(numbers);
// for num in range(0, 3) {
// let (tx, rx) = channel();
// tx.send(numbers.clone());
// spawn(proc() {
// let numbers = rx.recv();
// println!("{:d}", numbers[num as uint]);
// })
// }
let numbers = ~[1,2,3];
let numbers_lock = Arc::new(Mutex::new(numbers));
for num in range(0, 3) {
let (tx, rx) = channel();
tx.send(numbers_lock.clone());
spawn(proc() {
let numbers_lock = rx.recv();
let mut numbers = numbers_lock.lock();
numbers[num as uint] += 1;
println!("{}", numbers[num as uint]);
})
}
}
|
{
// let mut numbers = ~[1, 2, 3, 4];
// let (tx, rx) = channel ();
// numbers[0] = 4;
// tx.send(numbers);
// spawn (proc () {
// let mut numbers = rx.recv ();
// println! ("{}", numbers[0]);
// numbers[0] = 5;
// println! ("{}", numbers[0]);
// tx.send(numbers);
// })
// let numbers = ~[1,2,3];
// for num in range(0, 3) {
// let (tx, rx) = channel();
// tx.send(numbers.clone());
|
identifier_body
|
Concurrent.rs
|
extern crate sync;
use sync::{Arc, Mutex};
fn main () {
// let mut numbers = ~[1, 2, 3, 4];
// let (tx, rx) = channel ();
// numbers[0] = 4;
// tx.send(numbers);
// spawn (proc () {
// let mut numbers = rx.recv ();
|
// println! ("{}", numbers[0]);
// numbers[0] = 5;
// println! ("{}", numbers[0]);
// tx.send(numbers);
// })
// let numbers = ~[1,2,3];
// for num in range(0, 3) {
// let (tx, rx) = channel();
// tx.send(numbers.clone());
// spawn(proc() {
// let numbers = rx.recv();
// println!("{}", numbers[num]);
// })
// }
// let numbers = ~[1,2,3];
// let numbers = Arc::new(numbers);
// for num in range(0, 3) {
// let (tx, rx) = channel();
// tx.send(numbers.clone());
// spawn(proc() {
// let numbers = rx.recv();
// println!("{:d}", numbers[num as uint]);
// })
// }
let numbers = ~[1,2,3];
let numbers_lock = Arc::new(Mutex::new(numbers));
for num in range(0, 3) {
let (tx, rx) = channel();
tx.send(numbers_lock.clone());
spawn(proc() {
let numbers_lock = rx.recv();
let mut numbers = numbers_lock.lock();
numbers[num as uint] += 1;
println!("{}", numbers[num as uint]);
})
}
}
|
random_line_split
|
|
Concurrent.rs
|
extern crate sync;
use sync::{Arc, Mutex};
fn
|
() {
// let mut numbers = ~[1, 2, 3, 4];
// let (tx, rx) = channel ();
// numbers[0] = 4;
// tx.send(numbers);
// spawn (proc () {
// let mut numbers = rx.recv ();
// println! ("{}", numbers[0]);
// numbers[0] = 5;
// println! ("{}", numbers[0]);
// tx.send(numbers);
// })
// let numbers = ~[1,2,3];
// for num in range(0, 3) {
// let (tx, rx) = channel();
// tx.send(numbers.clone());
// spawn(proc() {
// let numbers = rx.recv();
// println!("{}", numbers[num]);
// })
// }
// let numbers = ~[1,2,3];
// let numbers = Arc::new(numbers);
// for num in range(0, 3) {
// let (tx, rx) = channel();
// tx.send(numbers.clone());
// spawn(proc() {
// let numbers = rx.recv();
// println!("{:d}", numbers[num as uint]);
// })
// }
let numbers = ~[1,2,3];
let numbers_lock = Arc::new(Mutex::new(numbers));
for num in range(0, 3) {
let (tx, rx) = channel();
tx.send(numbers_lock.clone());
spawn(proc() {
let numbers_lock = rx.recv();
let mut numbers = numbers_lock.lock();
numbers[num as uint] += 1;
println!("{}", numbers[num as uint]);
})
}
}
|
main
|
identifier_name
|
cksum.rs
|
#![crate_name = "uu_cksum"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
#[macro_use]
extern crate uucore;
use std::fs::File;
use std::io::{self, stdin, BufReader, Read};
#[cfg(not(windows))]
use std::mem;
use std::path::Path;
include!(concat!(env!("OUT_DIR"), "/crc_table.rs"));
static SYNTAX: &'static str = "[OPTIONS] [FILE]...";
static SUMMARY: &'static str = "Print CRC and size for each file";
static LONG_HELP: &'static str = "";
#[inline]
fn crc_update(crc: u32, input: u8) -> u32 {
(crc << 8) ^ CRC_TABLE[((crc >> 24) as usize ^ input as usize) & 0xFF]
}
#[inline]
fn crc_final(mut crc: u32, mut length: usize) -> u32 {
while length!= 0 {
crc = crc_update(crc, length as u8);
length >>= 8;
}
!crc
}
#[cfg(windows)]
fn init_byte_array() -> Vec<u8> {
vec![0; 1024 * 1024]
}
#[cfg(not(windows))]
fn
|
() -> [u8; 1024 * 1024] {
unsafe { mem::uninitialized() }
}
#[inline]
fn cksum(fname: &str) -> io::Result<(u32, usize)> {
let mut crc = 0u32;
let mut size = 0usize;
let file;
let mut rd: Box<Read> = match fname {
"-" => Box::new(stdin()),
_ => {
file = try!(File::open(&Path::new(fname)));
Box::new(BufReader::new(file))
}
};
let mut bytes = init_byte_array();
loop {
match rd.read(&mut bytes) {
Ok(num_bytes) => {
if num_bytes == 0 {
return Ok((crc_final(crc, size), size));
}
for &b in bytes[..num_bytes].iter() {
crc = crc_update(crc, b);
}
size += num_bytes;
}
Err(err) => return Err(err),
}
}
//Ok((0 as u32,0 as usize))
}
pub fn uumain(args: Vec<String>) -> i32 {
let matches = new_coreopts!(SYNTAX, SUMMARY, LONG_HELP).parse(args);
let files = matches.free;
if files.is_empty() {
match cksum("-") {
Ok((crc, size)) => println!("{} {}", crc, size),
Err(err) => {
show_error!("{}", err);
return 2;
}
}
return 0;
}
let mut exit_code = 0;
for fname in &files {
match cksum(fname.as_ref()) {
Ok((crc, size)) => println!("{} {} {}", crc, size, fname),
Err(err) => {
show_error!("'{}' {}", fname, err);
exit_code = 2;
}
}
}
exit_code
}
|
init_byte_array
|
identifier_name
|
cksum.rs
|
#![crate_name = "uu_cksum"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
#[macro_use]
extern crate uucore;
use std::fs::File;
use std::io::{self, stdin, BufReader, Read};
#[cfg(not(windows))]
use std::mem;
use std::path::Path;
include!(concat!(env!("OUT_DIR"), "/crc_table.rs"));
static SYNTAX: &'static str = "[OPTIONS] [FILE]...";
static SUMMARY: &'static str = "Print CRC and size for each file";
static LONG_HELP: &'static str = "";
#[inline]
fn crc_update(crc: u32, input: u8) -> u32 {
(crc << 8) ^ CRC_TABLE[((crc >> 24) as usize ^ input as usize) & 0xFF]
}
#[inline]
fn crc_final(mut crc: u32, mut length: usize) -> u32 {
while length!= 0 {
crc = crc_update(crc, length as u8);
length >>= 8;
}
!crc
}
#[cfg(windows)]
fn init_byte_array() -> Vec<u8>
|
#[cfg(not(windows))]
fn init_byte_array() -> [u8; 1024 * 1024] {
unsafe { mem::uninitialized() }
}
#[inline]
fn cksum(fname: &str) -> io::Result<(u32, usize)> {
let mut crc = 0u32;
let mut size = 0usize;
let file;
let mut rd: Box<Read> = match fname {
"-" => Box::new(stdin()),
_ => {
file = try!(File::open(&Path::new(fname)));
Box::new(BufReader::new(file))
}
};
let mut bytes = init_byte_array();
loop {
match rd.read(&mut bytes) {
Ok(num_bytes) => {
if num_bytes == 0 {
return Ok((crc_final(crc, size), size));
}
for &b in bytes[..num_bytes].iter() {
crc = crc_update(crc, b);
}
size += num_bytes;
}
Err(err) => return Err(err),
}
}
//Ok((0 as u32,0 as usize))
}
pub fn uumain(args: Vec<String>) -> i32 {
let matches = new_coreopts!(SYNTAX, SUMMARY, LONG_HELP).parse(args);
let files = matches.free;
if files.is_empty() {
match cksum("-") {
Ok((crc, size)) => println!("{} {}", crc, size),
Err(err) => {
show_error!("{}", err);
return 2;
}
}
return 0;
}
let mut exit_code = 0;
for fname in &files {
match cksum(fname.as_ref()) {
Ok((crc, size)) => println!("{} {} {}", crc, size, fname),
Err(err) => {
show_error!("'{}' {}", fname, err);
exit_code = 2;
}
}
}
exit_code
}
|
{
vec![0; 1024 * 1024]
}
|
identifier_body
|
cksum.rs
|
#![crate_name = "uu_cksum"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
#[macro_use]
extern crate uucore;
use std::fs::File;
use std::io::{self, stdin, BufReader, Read};
#[cfg(not(windows))]
use std::mem;
use std::path::Path;
include!(concat!(env!("OUT_DIR"), "/crc_table.rs"));
static SYNTAX: &'static str = "[OPTIONS] [FILE]...";
static SUMMARY: &'static str = "Print CRC and size for each file";
static LONG_HELP: &'static str = "";
#[inline]
fn crc_update(crc: u32, input: u8) -> u32 {
(crc << 8) ^ CRC_TABLE[((crc >> 24) as usize ^ input as usize) & 0xFF]
}
#[inline]
fn crc_final(mut crc: u32, mut length: usize) -> u32 {
while length!= 0 {
crc = crc_update(crc, length as u8);
length >>= 8;
}
!crc
}
#[cfg(windows)]
fn init_byte_array() -> Vec<u8> {
vec![0; 1024 * 1024]
}
#[cfg(not(windows))]
fn init_byte_array() -> [u8; 1024 * 1024] {
unsafe { mem::uninitialized() }
}
#[inline]
fn cksum(fname: &str) -> io::Result<(u32, usize)> {
let mut crc = 0u32;
let mut size = 0usize;
|
let mut rd: Box<Read> = match fname {
"-" => Box::new(stdin()),
_ => {
file = try!(File::open(&Path::new(fname)));
Box::new(BufReader::new(file))
}
};
let mut bytes = init_byte_array();
loop {
match rd.read(&mut bytes) {
Ok(num_bytes) => {
if num_bytes == 0 {
return Ok((crc_final(crc, size), size));
}
for &b in bytes[..num_bytes].iter() {
crc = crc_update(crc, b);
}
size += num_bytes;
}
Err(err) => return Err(err),
}
}
//Ok((0 as u32,0 as usize))
}
pub fn uumain(args: Vec<String>) -> i32 {
let matches = new_coreopts!(SYNTAX, SUMMARY, LONG_HELP).parse(args);
let files = matches.free;
if files.is_empty() {
match cksum("-") {
Ok((crc, size)) => println!("{} {}", crc, size),
Err(err) => {
show_error!("{}", err);
return 2;
}
}
return 0;
}
let mut exit_code = 0;
for fname in &files {
match cksum(fname.as_ref()) {
Ok((crc, size)) => println!("{} {} {}", crc, size, fname),
Err(err) => {
show_error!("'{}' {}", fname, err);
exit_code = 2;
}
}
}
exit_code
}
|
let file;
|
random_line_split
|
traversal.rs
|
let parent = node.traversal_parent().unwrap();
let remaining = parent.did_process_child();
if remaining!= 0 {
// The parent has other unprocessed descendants. We only
// perform postorder processing after the last descendant
// has been processed.
break
}
node = parent.as_node();
}
} else {
// Otherwise record the number of children to process when the time
// comes.
node.as_element().unwrap()
.store_children_to_process(children_to_process);
}
}
/// Style invalidations happen when traversing from a parent to its children.
/// However, this mechanism can't handle style invalidations on the root. As
/// such, we have a pre-traversal step to handle that part and determine whether
/// a full traversal is needed.
fn pre_traverse(
root: E,
shared_context: &SharedStyleContext,
traversal_flags: TraversalFlags
) -> PreTraverseToken {
// If this is an unstyled-only traversal, the caller has already verified
// that there's something to traverse, and we don't need to do any
// invalidation since we're not doing any restyling.
if traversal_flags.contains(traversal_flags::UnstyledOnly) {
return PreTraverseToken(true)
}
let flags = shared_context.traversal_flags;
let mut data = root.mutate_data();
let mut data = data.as_mut().map(|d| &mut **d);
if let Some(ref mut data) = data {
// Invalidate our style, and the one of our siblings and descendants
// as needed.
data.invalidate_style_if_needed(root, shared_context);
};
let parent = root.traversal_parent();
let parent_data = parent.as_ref().and_then(|p| p.borrow_data());
let should_traverse = Self::element_needs_traversal(
root,
flags,
data.map(|d| &*d),
parent_data.as_ref().map(|d| &**d)
);
PreTraverseToken(should_traverse)
}
/// Returns true if traversal should visit a text node. The style system
/// never processes text nodes, but Servo overrides this to visit them for
/// flow construction when necessary.
fn text_node_needs_traversal(node: E::ConcreteNode, _parent_data: &ElementData) -> bool {
debug_assert!(node.is_text_node());
false
}
/// Returns true if traversal is needed for the given element and subtree.
///
/// The caller passes |parent_data|, which is only null if there is no
/// parent.
fn element_needs_traversal(
el: E,
traversal_flags: TraversalFlags,
data: Option<&ElementData>,
parent_data: Option<&ElementData>,
) -> bool {
debug!("element_needs_traversal({:?}, {:?}, {:?}, {:?})",
el, traversal_flags, data, parent_data);
if traversal_flags.contains(traversal_flags::UnstyledOnly) {
return data.map_or(true, |d|!d.has_styles()) || el.has_dirty_descendants();
}
// In case of animation-only traversal we need to traverse the element
// if the element has animation only dirty descendants bit,
// animation-only restyle hint or recascade.
if traversal_flags.for_animation_only() {
return data.map_or(false, |d| d.has_styles()) &&
(el.has_animation_only_dirty_descendants() ||
data.as_ref().unwrap().restyle.hint.has_animation_hint_or_recascade());
}
// Non-incremental layout visits every node.
if is_servo_nonincremental_layout() {
return true;
}
// Unwrap the data.
let data = match data {
Some(d) if d.has_styles() => d,
_ => return true,
};
// If the element is native-anonymous and an ancestor frame will be
// reconstructed, the child and all its descendants will be destroyed.
// In that case, we wouldn't need to traverse the subtree...
//
// Except if there could be transitions of pseudo-elements, in which
// case we still need to process them, unfortunately.
//
// We need to conservatively continue the traversal to style the
// pseudo-element in order to properly process potentially-new
// transitions that we won't see otherwise.
//
// But it may be that we no longer match, so detect that case and act
// appropriately here.
if el.is_native_anonymous() {
if let Some(parent_data) = parent_data {
let going_to_reframe =
parent_data.restyle.reconstructed_self_or_ancestor();
let mut is_before_or_after_pseudo = false;
if let Some(pseudo) = el.implemented_pseudo_element() {
if pseudo.is_before_or_after() {
is_before_or_after_pseudo = true;
let still_match =
parent_data.styles.pseudos.get(&pseudo).is_some();
if!still_match {
debug_assert!(going_to_reframe,
"We're removing a pseudo, so we \
should reframe!");
return false;
}
}
}
if going_to_reframe &&!is_before_or_after_pseudo {
debug!("Element {:?} is in doomed NAC subtree, \
culling traversal", el);
return false;
}
}
}
// If the dirty descendants bit is set, we need to traverse no matter
// what. Skip examining the ElementData.
if el.has_dirty_descendants() {
return true;
}
// If we have a restyle hint or need to recascade, we need to visit the
// element.
//
// Note that this is different than checking has_current_styles_for_traversal(),
// since that can return true even if we have a restyle hint indicating
// that the element's descendants (but not necessarily the element) need
// restyling.
if!data.restyle.hint.is_empty() {
return true;
}
// Servo uses the post-order traversal for flow construction, so we need
// to traverse any element with damage so that we can perform fixup /
// reconstruction on our way back up the tree.
//
// In aggressively forgetful traversals (where we seek out and clear damage
// in addition to not computing it) we also need to traverse nodes with
// explicit damage and no other restyle data, so that this damage can be cleared.
if (cfg!(feature = "servo") ||
traversal_flags.contains(traversal_flags::AggressivelyForgetful)) &&
!data.restyle.damage.is_empty() {
return true;
}
trace!("{:?} doesn't need traversal", el);
false
}
/// Returns true if we want to cull this subtree from the travesal.
fn should_cull_subtree(
&self,
context: &mut StyleContext<E>,
parent: E,
parent_data: &ElementData,
) -> bool {
debug_assert!(cfg!(feature = "gecko") ||
parent.has_current_styles_for_traversal(parent_data, context.shared.traversal_flags));
// If the parent computed display:none, we don't style the subtree.
if parent_data.styles.is_display_none() {
debug!("Parent {:?} is display:none, culling traversal", parent);
return true;
}
// Gecko-only XBL handling.
//
// If we're computing initial styles and the parent has a Gecko XBL
// binding, that binding may inject anonymous children and remap the
// explicit children to an insertion point (or hide them entirely). It
// may also specify a scoped stylesheet, which changes the rules that
// apply within the subtree. These two effects can invalidate the result
// of property inheritance and selector matching (respectively) within
// the subtree.
//
// To avoid wasting work, we defer initial styling of XBL subtrees until
// frame construction, which does an explicit traversal of the unstyled
// children after shuffling the subtree. That explicit traversal may in
// turn find other bound elements, which get handled in the same way.
//
// We explicitly avoid handling restyles here (explicitly removing or
// changing bindings), since that adds complexity and is rarer. If it
// happens, we may just end up doing wasted work, since Gecko
// recursively drops Servo ElementData when the XBL insertion parent of
// an Element is changed.
if cfg!(feature = "gecko") && context.thread_local.is_initial_style() &&
parent_data.styles.primary().has_moz_binding()
{
debug!("Parent {:?} has XBL binding, deferring traversal", parent);
return true;
}
return false;
}
/// Return the shared style context common to all worker threads.
fn shared_context(&self) -> &SharedStyleContext;
/// Whether we're performing a parallel traversal.
///
/// NB: We do this check on runtime. We could guarantee correctness in this
/// regard via the type system via a `TraversalDriver` trait for this trait,
/// that could be one of two concrete types. It's not clear whether the
/// potential code size impact of that is worth it.
fn is_parallel(&self) -> bool;
}
/// Manually resolve style by sequentially walking up the parent chain to the
/// first styled Element, ignoring pending restyles. The resolved style is made
/// available via a callback, and can be dropped by the time this function
/// returns in the display:none subtree case.
pub fn resolve_style<E>(
context: &mut StyleContext<E>,
element: E,
rule_inclusion: RuleInclusion,
ignore_existing_style: bool,
) -> ElementStyles
where
E: TElement,
{
use style_resolver::StyleResolverForElement;
debug_assert!(rule_inclusion == RuleInclusion::DefaultOnly ||
ignore_existing_style ||
element.borrow_data().map_or(true, |d|!d.has_styles()),
"Why are we here?");
let mut ancestors_requiring_style_resolution = SmallVec::<[E; 16]>::new();
// Clear the bloom filter, just in case the caller is reusing TLS.
context.thread_local.bloom_filter.clear();
let mut style = None;
let mut ancestor = element.traversal_parent();
while let Some(current) = ancestor {
if rule_inclusion == RuleInclusion::All &&!ignore_existing_style {
if let Some(data) = current.borrow_data() {
if let Some(ancestor_style) = data.styles.get_primary() {
style = Some(ancestor_style.clone());
break;
}
}
}
ancestors_requiring_style_resolution.push(current);
ancestor = current.traversal_parent();
}
if let Some(ancestor) = ancestor {
context.thread_local.bloom_filter.rebuild(ancestor);
context.thread_local.bloom_filter.push(ancestor);
}
let mut layout_parent_style = style.clone();
while let Some(style) = layout_parent_style.take() {
if!style.is_display_contents() {
layout_parent_style = Some(style);
break;
}
ancestor = ancestor.unwrap().traversal_parent();
layout_parent_style = ancestor.map(|a| {
a.borrow_data().unwrap().styles.primary().clone()
});
}
for ancestor in ancestors_requiring_style_resolution.iter().rev() {
context.thread_local.bloom_filter.assert_complete(*ancestor);
let primary_style =
StyleResolverForElement::new(*ancestor, context, rule_inclusion)
.resolve_primary_style(
style.as_ref().map(|s| &**s),
layout_parent_style.as_ref().map(|s| &**s)
);
let is_display_contents = primary_style.style.is_display_contents();
style = Some(primary_style.style);
if!is_display_contents {
layout_parent_style = style.clone();
}
context.thread_local.bloom_filter.push(*ancestor);
}
context.thread_local.bloom_filter.assert_complete(element);
StyleResolverForElement::new(element, context, rule_inclusion)
.resolve_style(
style.as_ref().map(|s| &**s),
layout_parent_style.as_ref().map(|s| &**s)
)
}
/// Calculates the style for a single node.
#[inline]
#[allow(unsafe_code)]
pub fn recalc_style_at<E, D, F>(
traversal: &D,
traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>,
element: E,
data: &mut ElementData,
note_child: F,
)
where
E: TElement,
D: DomTraversal<E>,
F: FnMut(E::ConcreteNode),
{
use traversal_flags::*;
let flags = context.shared.traversal_flags;
context.thread_local.begin_element(element, data);
context.thread_local.statistics.elements_traversed += 1;
debug_assert!(flags.intersects(AnimationOnly | UnstyledOnly) ||
!element.has_snapshot() || element.handled_snapshot(),
"Should've handled snapshots here already");
let compute_self =!element.has_current_styles_for_traversal(data, flags);
let mut hint = RestyleHint::empty();
debug!("recalc_style_at: {:?} (compute_self={:?}, \
dirty_descendants={:?}, data={:?})",
element, compute_self, element.has_dirty_descendants(), data);
// Compute style for this element if necessary.
if compute_self {
match compute_style(traversal_data, context, element, data) {
ChildCascadeRequirement::MustCascadeChildren => {
hint |= RECASCADE_SELF;
}
ChildCascadeRequirement::MustCascadeDescendants => {
hint |= RECASCADE_SELF | RECASCADE_DESCENDANTS;
}
ChildCascadeRequirement::CanSkipCascade => {}
};
// We must always cascade native anonymous subtrees, since they inherit
// styles from their first non-NAC ancestor.
if element.is_native_anonymous() {
hint |= RECASCADE_SELF;
}
// If we're restyling this element to display:none, throw away all style
// data in the subtree, notify the caller to early-return.
if data.styles.is_display_none() {
debug!("{:?} style is display:none - clearing data from descendants.",
element);
clear_descendant_data(element)
}
// Inform any paint worklets of changed style, to speculatively
// evaluate the worklet code. In the case that the size hasn't changed,
// this will result in increased concurrency between script and layout.
notify_paint_worklet(context, data);
} else {
debug_assert!(data.has_styles());
data.restyle.set_traversed_without_styling();
}
// Now that matching and cascading is done, clear the bits corresponding to
// those operations and compute the propagated restyle hint (unless we're
// not processing invalidations, in which case don't need to propagate it
// and must avoid clearing it).
let mut propagated_hint = if flags.contains(UnstyledOnly) {
RestyleHint::empty()
} else {
debug_assert!(flags.for_animation_only() ||
!data.restyle.hint.has_animation_hint(),
"animation restyle hint should be handled during \
animation-only restyles");
data.restyle.hint.propagate(&flags)
};
// FIXME(bholley): Need to handle explicitly-inherited reset properties
// somewhere.
propagated_hint.insert(hint);
trace!("propagated_hint={:?} \
is_display_none={:?}, implementing_pseudo={:?}",
propagated_hint,
data.styles.is_display_none(),
element.implemented_pseudo_element());
debug_assert!(element.has_current_styles_for_traversal(data, flags),
"Should have computed style or haven't yet valid computed \
style in case of animation-only restyle");
let has_dirty_descendants_for_this_restyle =
if flags.for_animation_only() {
element.has_animation_only_dirty_descendants()
} else {
element.has_dirty_descendants()
};
// Before examining each child individually, try to prove that our children
// don't need style processing. They need processing if any of the following
// conditions hold:
// * We have the dirty descendants bit.
// * We're propagating a hint.
// * This is the initial style.
// * We generated a reconstruct hint on self (which could mean that we
// switched from display:none to something else, which means the children
// need initial styling).
// * This is a servo non-incremental traversal.
//
// Additionally, there are a few scenarios where we avoid traversing the
// subtree even if descendant styles are out of date. These cases are
// enumerated in should_cull_subtree().
let mut traverse_children = has_dirty_descendants_for_this_restyle ||
!propagated_hint.is_empty() ||
context.thread_local.is_initial_style() ||
data.restyle.reconstructed_self() ||
is_servo_nonincremental_layout();
traverse_children = traverse_children &&
!traversal.should_cull_subtree(context, element, &data);
// Examine our children, and enqueue the appropriate ones for traversal.
if traverse_children
|
// If we are in a forgetful traversal, drop the existing restyle
// data here, since we won't need to perform a post-traversal to pick up
// any change hints.
if flags.contains(Forgetful) {
data.clear_restyle_flags_and_damage();
}
// Optionally clear the descendants bit for the traversal type we're in.
if flags.for_animation_only() {
if flags.contains(ClearAnimationOnlyDirtyDescendants) {
unsafe { element.unset_animation_only_dirty_descendants(); }
}
} else {
// There are two cases when we want to clear the dity descendants bit here
// after styling this element. The first case is when we were explicitly
// asked to clear the bit by the caller.
//
// The second case is when this element is the root of a display:none
// subtree, even if the style didn't change (since, if the style did change,
// we'd have already cleared it above).
//
// This keeps the tree in a valid state without requiring the DOM to check
// display:none on the parent when inserting new children (which can be
// moderately expensive). Instead, DOM implementations can unconditionally
// set the dirty descendants bit on any styled parent, and let the traversal
// sort it out.
if flags.contains(ClearDirtyDescendants) ||
data.styles.is_display_none() {
unsafe { element.unset_dirty_descendants(); }
}
}
context.thread_local.end_element(element);
}
fn compute_style<E>(
traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>,
element: E,
data: &mut ElementData
) -> ChildCascadeRequirement
where
E: TElement,
{
use data::RestyleKind::*;
use sharing::StyleSharingResult::*;
context.thread_local.statistics.elements_styled += 1;
let kind = data.restyle
|
{
note_children::<E, D, F>(
context,
element,
data,
propagated_hint,
data.restyle.reconstructed_self_or_ancestor(),
note_child
);
}
|
conditional_block
|
traversal.rs
|
mut node: E::ConcreteNode,
children_to_process: isize
) {
// If the postorder step is a no-op, don't bother.
if!Self::needs_postorder_traversal() {
return;
}
if children_to_process == 0 {
// We are a leaf. Walk up the chain.
loop {
self.process_postorder(context, node);
if node.opaque() == root {
break;
}
let parent = node.traversal_parent().unwrap();
let remaining = parent.did_process_child();
if remaining!= 0 {
// The parent has other unprocessed descendants. We only
// perform postorder processing after the last descendant
// has been processed.
break
}
node = parent.as_node();
}
} else {
// Otherwise record the number of children to process when the time
// comes.
node.as_element().unwrap()
.store_children_to_process(children_to_process);
}
}
/// Style invalidations happen when traversing from a parent to its children.
/// However, this mechanism can't handle style invalidations on the root. As
/// such, we have a pre-traversal step to handle that part and determine whether
/// a full traversal is needed.
fn pre_traverse(
root: E,
shared_context: &SharedStyleContext,
traversal_flags: TraversalFlags
) -> PreTraverseToken {
// If this is an unstyled-only traversal, the caller has already verified
// that there's something to traverse, and we don't need to do any
// invalidation since we're not doing any restyling.
if traversal_flags.contains(traversal_flags::UnstyledOnly) {
return PreTraverseToken(true)
}
let flags = shared_context.traversal_flags;
let mut data = root.mutate_data();
let mut data = data.as_mut().map(|d| &mut **d);
if let Some(ref mut data) = data {
// Invalidate our style, and the one of our siblings and descendants
// as needed.
data.invalidate_style_if_needed(root, shared_context);
};
let parent = root.traversal_parent();
let parent_data = parent.as_ref().and_then(|p| p.borrow_data());
let should_traverse = Self::element_needs_traversal(
root,
flags,
data.map(|d| &*d),
parent_data.as_ref().map(|d| &**d)
);
PreTraverseToken(should_traverse)
}
/// Returns true if traversal should visit a text node. The style system
/// never processes text nodes, but Servo overrides this to visit them for
/// flow construction when necessary.
fn text_node_needs_traversal(node: E::ConcreteNode, _parent_data: &ElementData) -> bool {
debug_assert!(node.is_text_node());
false
}
/// Returns true if traversal is needed for the given element and subtree.
///
/// The caller passes |parent_data|, which is only null if there is no
/// parent.
fn element_needs_traversal(
el: E,
traversal_flags: TraversalFlags,
data: Option<&ElementData>,
parent_data: Option<&ElementData>,
) -> bool {
debug!("element_needs_traversal({:?}, {:?}, {:?}, {:?})",
el, traversal_flags, data, parent_data);
if traversal_flags.contains(traversal_flags::UnstyledOnly) {
return data.map_or(true, |d|!d.has_styles()) || el.has_dirty_descendants();
}
// In case of animation-only traversal we need to traverse the element
// if the element has animation only dirty descendants bit,
// animation-only restyle hint or recascade.
if traversal_flags.for_animation_only() {
return data.map_or(false, |d| d.has_styles()) &&
(el.has_animation_only_dirty_descendants() ||
data.as_ref().unwrap().restyle.hint.has_animation_hint_or_recascade());
}
// Non-incremental layout visits every node.
if is_servo_nonincremental_layout() {
return true;
}
// Unwrap the data.
let data = match data {
Some(d) if d.has_styles() => d,
_ => return true,
};
// If the element is native-anonymous and an ancestor frame will be
// reconstructed, the child and all its descendants will be destroyed.
// In that case, we wouldn't need to traverse the subtree...
//
// Except if there could be transitions of pseudo-elements, in which
// case we still need to process them, unfortunately.
//
// We need to conservatively continue the traversal to style the
// pseudo-element in order to properly process potentially-new
// transitions that we won't see otherwise.
//
// But it may be that we no longer match, so detect that case and act
// appropriately here.
if el.is_native_anonymous() {
if let Some(parent_data) = parent_data {
let going_to_reframe =
parent_data.restyle.reconstructed_self_or_ancestor();
let mut is_before_or_after_pseudo = false;
if let Some(pseudo) = el.implemented_pseudo_element() {
if pseudo.is_before_or_after() {
is_before_or_after_pseudo = true;
let still_match =
parent_data.styles.pseudos.get(&pseudo).is_some();
if!still_match {
debug_assert!(going_to_reframe,
"We're removing a pseudo, so we \
should reframe!");
return false;
}
}
}
if going_to_reframe &&!is_before_or_after_pseudo {
debug!("Element {:?} is in doomed NAC subtree, \
culling traversal", el);
return false;
}
}
}
// If the dirty descendants bit is set, we need to traverse no matter
// what. Skip examining the ElementData.
if el.has_dirty_descendants() {
return true;
}
// If we have a restyle hint or need to recascade, we need to visit the
// element.
//
// Note that this is different than checking has_current_styles_for_traversal(),
// since that can return true even if we have a restyle hint indicating
// that the element's descendants (but not necessarily the element) need
// restyling.
if!data.restyle.hint.is_empty() {
return true;
}
// Servo uses the post-order traversal for flow construction, so we need
// to traverse any element with damage so that we can perform fixup /
// reconstruction on our way back up the tree.
//
// In aggressively forgetful traversals (where we seek out and clear damage
// in addition to not computing it) we also need to traverse nodes with
// explicit damage and no other restyle data, so that this damage can be cleared.
if (cfg!(feature = "servo") ||
traversal_flags.contains(traversal_flags::AggressivelyForgetful)) &&
!data.restyle.damage.is_empty() {
return true;
}
trace!("{:?} doesn't need traversal", el);
false
}
/// Returns true if we want to cull this subtree from the travesal.
fn should_cull_subtree(
&self,
context: &mut StyleContext<E>,
parent: E,
parent_data: &ElementData,
) -> bool {
debug_assert!(cfg!(feature = "gecko") ||
parent.has_current_styles_for_traversal(parent_data, context.shared.traversal_flags));
// If the parent computed display:none, we don't style the subtree.
if parent_data.styles.is_display_none() {
debug!("Parent {:?} is display:none, culling traversal", parent);
return true;
}
// Gecko-only XBL handling.
//
// If we're computing initial styles and the parent has a Gecko XBL
// binding, that binding may inject anonymous children and remap the
// explicit children to an insertion point (or hide them entirely). It
// may also specify a scoped stylesheet, which changes the rules that
// apply within the subtree. These two effects can invalidate the result
// of property inheritance and selector matching (respectively) within
// the subtree.
//
// To avoid wasting work, we defer initial styling of XBL subtrees until
// frame construction, which does an explicit traversal of the unstyled
// children after shuffling the subtree. That explicit traversal may in
// turn find other bound elements, which get handled in the same way.
//
// We explicitly avoid handling restyles here (explicitly removing or
// changing bindings), since that adds complexity and is rarer. If it
// happens, we may just end up doing wasted work, since Gecko
// recursively drops Servo ElementData when the XBL insertion parent of
// an Element is changed.
if cfg!(feature = "gecko") && context.thread_local.is_initial_style() &&
parent_data.styles.primary().has_moz_binding()
{
debug!("Parent {:?} has XBL binding, deferring traversal", parent);
return true;
}
return false;
}
/// Return the shared style context common to all worker threads.
fn shared_context(&self) -> &SharedStyleContext;
/// Whether we're performing a parallel traversal.
///
/// NB: We do this check on runtime. We could guarantee correctness in this
/// regard via the type system via a `TraversalDriver` trait for this trait,
/// that could be one of two concrete types. It's not clear whether the
/// potential code size impact of that is worth it.
fn is_parallel(&self) -> bool;
}
/// Manually resolve style by sequentially walking up the parent chain to the
/// first styled Element, ignoring pending restyles. The resolved style is made
/// available via a callback, and can be dropped by the time this function
/// returns in the display:none subtree case.
pub fn resolve_style<E>(
context: &mut StyleContext<E>,
element: E,
rule_inclusion: RuleInclusion,
ignore_existing_style: bool,
) -> ElementStyles
where
E: TElement,
{
use style_resolver::StyleResolverForElement;
debug_assert!(rule_inclusion == RuleInclusion::DefaultOnly ||
ignore_existing_style ||
element.borrow_data().map_or(true, |d|!d.has_styles()),
"Why are we here?");
let mut ancestors_requiring_style_resolution = SmallVec::<[E; 16]>::new();
// Clear the bloom filter, just in case the caller is reusing TLS.
context.thread_local.bloom_filter.clear();
let mut style = None;
let mut ancestor = element.traversal_parent();
while let Some(current) = ancestor {
if rule_inclusion == RuleInclusion::All &&!ignore_existing_style {
if let Some(data) = current.borrow_data() {
if let Some(ancestor_style) = data.styles.get_primary() {
style = Some(ancestor_style.clone());
break;
}
}
}
ancestors_requiring_style_resolution.push(current);
ancestor = current.traversal_parent();
}
if let Some(ancestor) = ancestor {
context.thread_local.bloom_filter.rebuild(ancestor);
context.thread_local.bloom_filter.push(ancestor);
}
let mut layout_parent_style = style.clone();
while let Some(style) = layout_parent_style.take() {
if!style.is_display_contents() {
layout_parent_style = Some(style);
break;
}
ancestor = ancestor.unwrap().traversal_parent();
layout_parent_style = ancestor.map(|a| {
a.borrow_data().unwrap().styles.primary().clone()
});
}
for ancestor in ancestors_requiring_style_resolution.iter().rev() {
context.thread_local.bloom_filter.assert_complete(*ancestor);
let primary_style =
StyleResolverForElement::new(*ancestor, context, rule_inclusion)
.resolve_primary_style(
style.as_ref().map(|s| &**s),
layout_parent_style.as_ref().map(|s| &**s)
);
let is_display_contents = primary_style.style.is_display_contents();
style = Some(primary_style.style);
if!is_display_contents {
layout_parent_style = style.clone();
}
context.thread_local.bloom_filter.push(*ancestor);
}
context.thread_local.bloom_filter.assert_complete(element);
StyleResolverForElement::new(element, context, rule_inclusion)
.resolve_style(
style.as_ref().map(|s| &**s),
layout_parent_style.as_ref().map(|s| &**s)
)
}
/// Calculates the style for a single node.
#[inline]
#[allow(unsafe_code)]
pub fn recalc_style_at<E, D, F>(
traversal: &D,
traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>,
element: E,
data: &mut ElementData,
note_child: F,
)
where
E: TElement,
D: DomTraversal<E>,
F: FnMut(E::ConcreteNode),
{
use traversal_flags::*;
let flags = context.shared.traversal_flags;
context.thread_local.begin_element(element, data);
context.thread_local.statistics.elements_traversed += 1;
debug_assert!(flags.intersects(AnimationOnly | UnstyledOnly) ||
!element.has_snapshot() || element.handled_snapshot(),
"Should've handled snapshots here already");
let compute_self =!element.has_current_styles_for_traversal(data, flags);
let mut hint = RestyleHint::empty();
debug!("recalc_style_at: {:?} (compute_self={:?}, \
dirty_descendants={:?}, data={:?})",
element, compute_self, element.has_dirty_descendants(), data);
// Compute style for this element if necessary.
if compute_self {
match compute_style(traversal_data, context, element, data) {
ChildCascadeRequirement::MustCascadeChildren => {
hint |= RECASCADE_SELF;
}
ChildCascadeRequirement::MustCascadeDescendants => {
hint |= RECASCADE_SELF | RECASCADE_DESCENDANTS;
}
ChildCascadeRequirement::CanSkipCascade => {}
};
// We must always cascade native anonymous subtrees, since they inherit
// styles from their first non-NAC ancestor.
if element.is_native_anonymous() {
hint |= RECASCADE_SELF;
}
// If we're restyling this element to display:none, throw away all style
// data in the subtree, notify the caller to early-return.
if data.styles.is_display_none() {
debug!("{:?} style is display:none - clearing data from descendants.",
element);
clear_descendant_data(element)
}
// Inform any paint worklets of changed style, to speculatively
// evaluate the worklet code. In the case that the size hasn't changed,
// this will result in increased concurrency between script and layout.
notify_paint_worklet(context, data);
} else {
debug_assert!(data.has_styles());
data.restyle.set_traversed_without_styling();
}
// Now that matching and cascading is done, clear the bits corresponding to
// those operations and compute the propagated restyle hint (unless we're
// not processing invalidations, in which case don't need to propagate it
// and must avoid clearing it).
let mut propagated_hint = if flags.contains(UnstyledOnly) {
RestyleHint::empty()
} else {
debug_assert!(flags.for_animation_only() ||
!data.restyle.hint.has_animation_hint(),
"animation restyle hint should be handled during \
animation-only restyles");
data.restyle.hint.propagate(&flags)
};
// FIXME(bholley): Need to handle explicitly-inherited reset properties
// somewhere.
propagated_hint.insert(hint);
trace!("propagated_hint={:?} \
is_display_none={:?}, implementing_pseudo={:?}",
propagated_hint,
data.styles.is_display_none(),
element.implemented_pseudo_element());
debug_assert!(element.has_current_styles_for_traversal(data, flags),
"Should have computed style or haven't yet valid computed \
style in case of animation-only restyle");
let has_dirty_descendants_for_this_restyle =
if flags.for_animation_only() {
element.has_animation_only_dirty_descendants()
} else {
element.has_dirty_descendants()
};
// Before examining each child individually, try to prove that our children
// don't need style processing. They need processing if any of the following
// conditions hold:
// * We have the dirty descendants bit.
// * We're propagating a hint.
// * This is the initial style.
// * We generated a reconstruct hint on self (which could mean that we
// switched from display:none to something else, which means the children
// need initial styling).
// * This is a servo non-incremental traversal.
//
// Additionally, there are a few scenarios where we avoid traversing the
// subtree even if descendant styles are out of date. These cases are
// enumerated in should_cull_subtree().
let mut traverse_children = has_dirty_descendants_for_this_restyle ||
!propagated_hint.is_empty() ||
context.thread_local.is_initial_style() ||
data.restyle.reconstructed_self() ||
is_servo_nonincremental_layout();
traverse_children = traverse_children &&
!traversal.should_cull_subtree(context, element, &data);
// Examine our children, and enqueue the appropriate ones for traversal.
if traverse_children {
note_children::<E, D, F>(
context,
element,
data,
propagated_hint,
data.restyle.reconstructed_self_or_ancestor(),
note_child
);
}
// If we are in a forgetful traversal, drop the existing restyle
// data here, since we won't need to perform a post-traversal to pick up
// any change hints.
if flags.contains(Forgetful) {
data.clear_restyle_flags_and_damage();
}
// Optionally clear the descendants bit for the traversal type we're in.
if flags.for_animation_only() {
if flags.contains(ClearAnimationOnlyDirtyDescendants) {
unsafe { element.unset_animation_only_dirty_descendants(); }
}
} else {
// There are two cases when we want to clear the dity descendants bit here
// after styling this element. The first case is when we were explicitly
// asked to clear the bit by the caller.
//
// The second case is when this element is the root of a display:none
// subtree, even if the style didn't change (since, if the style did change,
// we'd have already cleared it above).
//
// This keeps the tree in a valid state without requiring the DOM to check
// display:none on the parent when inserting new children (which can be
// moderately
|
root: OpaqueNode,
|
random_line_split
|
|
traversal.rs
|
let parent = node.traversal_parent().unwrap();
let remaining = parent.did_process_child();
if remaining!= 0 {
// The parent has other unprocessed descendants. We only
// perform postorder processing after the last descendant
// has been processed.
break
}
node = parent.as_node();
}
} else {
// Otherwise record the number of children to process when the time
// comes.
node.as_element().unwrap()
.store_children_to_process(children_to_process);
}
}
/// Style invalidations happen when traversing from a parent to its children.
/// However, this mechanism can't handle style invalidations on the root. As
/// such, we have a pre-traversal step to handle that part and determine whether
/// a full traversal is needed.
fn pre_traverse(
root: E,
shared_context: &SharedStyleContext,
traversal_flags: TraversalFlags
) -> PreTraverseToken {
// If this is an unstyled-only traversal, the caller has already verified
// that there's something to traverse, and we don't need to do any
// invalidation since we're not doing any restyling.
if traversal_flags.contains(traversal_flags::UnstyledOnly) {
return PreTraverseToken(true)
}
let flags = shared_context.traversal_flags;
let mut data = root.mutate_data();
let mut data = data.as_mut().map(|d| &mut **d);
if let Some(ref mut data) = data {
// Invalidate our style, and the one of our siblings and descendants
// as needed.
data.invalidate_style_if_needed(root, shared_context);
};
let parent = root.traversal_parent();
let parent_data = parent.as_ref().and_then(|p| p.borrow_data());
let should_traverse = Self::element_needs_traversal(
root,
flags,
data.map(|d| &*d),
parent_data.as_ref().map(|d| &**d)
);
PreTraverseToken(should_traverse)
}
/// Returns true if traversal should visit a text node. The style system
/// never processes text nodes, but Servo overrides this to visit them for
/// flow construction when necessary.
fn text_node_needs_traversal(node: E::ConcreteNode, _parent_data: &ElementData) -> bool {
debug_assert!(node.is_text_node());
false
}
/// Returns true if traversal is needed for the given element and subtree.
///
/// The caller passes |parent_data|, which is only null if there is no
/// parent.
fn element_needs_traversal(
el: E,
traversal_flags: TraversalFlags,
data: Option<&ElementData>,
parent_data: Option<&ElementData>,
) -> bool {
debug!("element_needs_traversal({:?}, {:?}, {:?}, {:?})",
el, traversal_flags, data, parent_data);
if traversal_flags.contains(traversal_flags::UnstyledOnly) {
return data.map_or(true, |d|!d.has_styles()) || el.has_dirty_descendants();
}
// In case of animation-only traversal we need to traverse the element
// if the element has animation only dirty descendants bit,
// animation-only restyle hint or recascade.
if traversal_flags.for_animation_only() {
return data.map_or(false, |d| d.has_styles()) &&
(el.has_animation_only_dirty_descendants() ||
data.as_ref().unwrap().restyle.hint.has_animation_hint_or_recascade());
}
// Non-incremental layout visits every node.
if is_servo_nonincremental_layout() {
return true;
}
// Unwrap the data.
let data = match data {
Some(d) if d.has_styles() => d,
_ => return true,
};
// If the element is native-anonymous and an ancestor frame will be
// reconstructed, the child and all its descendants will be destroyed.
// In that case, we wouldn't need to traverse the subtree...
//
// Except if there could be transitions of pseudo-elements, in which
// case we still need to process them, unfortunately.
//
// We need to conservatively continue the traversal to style the
// pseudo-element in order to properly process potentially-new
// transitions that we won't see otherwise.
//
// But it may be that we no longer match, so detect that case and act
// appropriately here.
if el.is_native_anonymous() {
if let Some(parent_data) = parent_data {
let going_to_reframe =
parent_data.restyle.reconstructed_self_or_ancestor();
let mut is_before_or_after_pseudo = false;
if let Some(pseudo) = el.implemented_pseudo_element() {
if pseudo.is_before_or_after() {
is_before_or_after_pseudo = true;
let still_match =
parent_data.styles.pseudos.get(&pseudo).is_some();
if!still_match {
debug_assert!(going_to_reframe,
"We're removing a pseudo, so we \
should reframe!");
return false;
}
}
}
if going_to_reframe &&!is_before_or_after_pseudo {
debug!("Element {:?} is in doomed NAC subtree, \
culling traversal", el);
return false;
}
}
}
// If the dirty descendants bit is set, we need to traverse no matter
// what. Skip examining the ElementData.
if el.has_dirty_descendants() {
return true;
}
// If we have a restyle hint or need to recascade, we need to visit the
// element.
//
// Note that this is different than checking has_current_styles_for_traversal(),
// since that can return true even if we have a restyle hint indicating
// that the element's descendants (but not necessarily the element) need
// restyling.
if!data.restyle.hint.is_empty() {
return true;
}
// Servo uses the post-order traversal for flow construction, so we need
// to traverse any element with damage so that we can perform fixup /
// reconstruction on our way back up the tree.
//
// In aggressively forgetful traversals (where we seek out and clear damage
// in addition to not computing it) we also need to traverse nodes with
// explicit damage and no other restyle data, so that this damage can be cleared.
if (cfg!(feature = "servo") ||
traversal_flags.contains(traversal_flags::AggressivelyForgetful)) &&
!data.restyle.damage.is_empty() {
return true;
}
trace!("{:?} doesn't need traversal", el);
false
}
/// Returns true if we want to cull this subtree from the travesal.
fn should_cull_subtree(
&self,
context: &mut StyleContext<E>,
parent: E,
parent_data: &ElementData,
) -> bool
|
// To avoid wasting work, we defer initial styling of XBL subtrees until
// frame construction, which does an explicit traversal of the unstyled
// children after shuffling the subtree. That explicit traversal may in
// turn find other bound elements, which get handled in the same way.
//
// We explicitly avoid handling restyles here (explicitly removing or
// changing bindings), since that adds complexity and is rarer. If it
// happens, we may just end up doing wasted work, since Gecko
// recursively drops Servo ElementData when the XBL insertion parent of
// an Element is changed.
if cfg!(feature = "gecko") && context.thread_local.is_initial_style() &&
parent_data.styles.primary().has_moz_binding()
{
debug!("Parent {:?} has XBL binding, deferring traversal", parent);
return true;
}
return false;
}
/// Return the shared style context common to all worker threads.
fn shared_context(&self) -> &SharedStyleContext;
/// Whether we're performing a parallel traversal.
///
/// NB: We do this check on runtime. We could guarantee correctness in this
/// regard via the type system via a `TraversalDriver` trait for this trait,
/// that could be one of two concrete types. It's not clear whether the
/// potential code size impact of that is worth it.
fn is_parallel(&self) -> bool;
}
/// Manually resolve style by sequentially walking up the parent chain to the
/// first styled Element, ignoring pending restyles. The resolved style is made
/// available via a callback, and can be dropped by the time this function
/// returns in the display:none subtree case.
pub fn resolve_style<E>(
context: &mut StyleContext<E>,
element: E,
rule_inclusion: RuleInclusion,
ignore_existing_style: bool,
) -> ElementStyles
where
E: TElement,
{
use style_resolver::StyleResolverForElement;
debug_assert!(rule_inclusion == RuleInclusion::DefaultOnly ||
ignore_existing_style ||
element.borrow_data().map_or(true, |d|!d.has_styles()),
"Why are we here?");
let mut ancestors_requiring_style_resolution = SmallVec::<[E; 16]>::new();
// Clear the bloom filter, just in case the caller is reusing TLS.
context.thread_local.bloom_filter.clear();
let mut style = None;
let mut ancestor = element.traversal_parent();
while let Some(current) = ancestor {
if rule_inclusion == RuleInclusion::All &&!ignore_existing_style {
if let Some(data) = current.borrow_data() {
if let Some(ancestor_style) = data.styles.get_primary() {
style = Some(ancestor_style.clone());
break;
}
}
}
ancestors_requiring_style_resolution.push(current);
ancestor = current.traversal_parent();
}
if let Some(ancestor) = ancestor {
context.thread_local.bloom_filter.rebuild(ancestor);
context.thread_local.bloom_filter.push(ancestor);
}
let mut layout_parent_style = style.clone();
while let Some(style) = layout_parent_style.take() {
if!style.is_display_contents() {
layout_parent_style = Some(style);
break;
}
ancestor = ancestor.unwrap().traversal_parent();
layout_parent_style = ancestor.map(|a| {
a.borrow_data().unwrap().styles.primary().clone()
});
}
for ancestor in ancestors_requiring_style_resolution.iter().rev() {
context.thread_local.bloom_filter.assert_complete(*ancestor);
let primary_style =
StyleResolverForElement::new(*ancestor, context, rule_inclusion)
.resolve_primary_style(
style.as_ref().map(|s| &**s),
layout_parent_style.as_ref().map(|s| &**s)
);
let is_display_contents = primary_style.style.is_display_contents();
style = Some(primary_style.style);
if!is_display_contents {
layout_parent_style = style.clone();
}
context.thread_local.bloom_filter.push(*ancestor);
}
context.thread_local.bloom_filter.assert_complete(element);
StyleResolverForElement::new(element, context, rule_inclusion)
.resolve_style(
style.as_ref().map(|s| &**s),
layout_parent_style.as_ref().map(|s| &**s)
)
}
/// Calculates the style for a single node.
#[inline]
#[allow(unsafe_code)]
pub fn recalc_style_at<E, D, F>(
traversal: &D,
traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>,
element: E,
data: &mut ElementData,
note_child: F,
)
where
E: TElement,
D: DomTraversal<E>,
F: FnMut(E::ConcreteNode),
{
use traversal_flags::*;
let flags = context.shared.traversal_flags;
context.thread_local.begin_element(element, data);
context.thread_local.statistics.elements_traversed += 1;
debug_assert!(flags.intersects(AnimationOnly | UnstyledOnly) ||
!element.has_snapshot() || element.handled_snapshot(),
"Should've handled snapshots here already");
let compute_self =!element.has_current_styles_for_traversal(data, flags);
let mut hint = RestyleHint::empty();
debug!("recalc_style_at: {:?} (compute_self={:?}, \
dirty_descendants={:?}, data={:?})",
element, compute_self, element.has_dirty_descendants(), data);
// Compute style for this element if necessary.
if compute_self {
match compute_style(traversal_data, context, element, data) {
ChildCascadeRequirement::MustCascadeChildren => {
hint |= RECASCADE_SELF;
}
ChildCascadeRequirement::MustCascadeDescendants => {
hint |= RECASCADE_SELF | RECASCADE_DESCENDANTS;
}
ChildCascadeRequirement::CanSkipCascade => {}
};
// We must always cascade native anonymous subtrees, since they inherit
// styles from their first non-NAC ancestor.
if element.is_native_anonymous() {
hint |= RECASCADE_SELF;
}
// If we're restyling this element to display:none, throw away all style
// data in the subtree, notify the caller to early-return.
if data.styles.is_display_none() {
debug!("{:?} style is display:none - clearing data from descendants.",
element);
clear_descendant_data(element)
}
// Inform any paint worklets of changed style, to speculatively
// evaluate the worklet code. In the case that the size hasn't changed,
// this will result in increased concurrency between script and layout.
notify_paint_worklet(context, data);
} else {
debug_assert!(data.has_styles());
data.restyle.set_traversed_without_styling();
}
// Now that matching and cascading is done, clear the bits corresponding to
// those operations and compute the propagated restyle hint (unless we're
// not processing invalidations, in which case don't need to propagate it
// and must avoid clearing it).
let mut propagated_hint = if flags.contains(UnstyledOnly) {
RestyleHint::empty()
} else {
debug_assert!(flags.for_animation_only() ||
!data.restyle.hint.has_animation_hint(),
"animation restyle hint should be handled during \
animation-only restyles");
data.restyle.hint.propagate(&flags)
};
// FIXME(bholley): Need to handle explicitly-inherited reset properties
// somewhere.
propagated_hint.insert(hint);
trace!("propagated_hint={:?} \
is_display_none={:?}, implementing_pseudo={:?}",
propagated_hint,
data.styles.is_display_none(),
element.implemented_pseudo_element());
debug_assert!(element.has_current_styles_for_traversal(data, flags),
"Should have computed style or haven't yet valid computed \
style in case of animation-only restyle");
let has_dirty_descendants_for_this_restyle =
if flags.for_animation_only() {
element.has_animation_only_dirty_descendants()
} else {
element.has_dirty_descendants()
};
// Before examining each child individually, try to prove that our children
// don't need style processing. They need processing if any of the following
// conditions hold:
// * We have the dirty descendants bit.
// * We're propagating a hint.
// * This is the initial style.
// * We generated a reconstruct hint on self (which could mean that we
// switched from display:none to something else, which means the children
// need initial styling).
// * This is a servo non-incremental traversal.
//
// Additionally, there are a few scenarios where we avoid traversing the
// subtree even if descendant styles are out of date. These cases are
// enumerated in should_cull_subtree().
let mut traverse_children = has_dirty_descendants_for_this_restyle ||
!propagated_hint.is_empty() ||
context.thread_local.is_initial_style() ||
data.restyle.reconstructed_self() ||
is_servo_nonincremental_layout();
traverse_children = traverse_children &&
!traversal.should_cull_subtree(context, element, &data);
// Examine our children, and enqueue the appropriate ones for traversal.
if traverse_children {
note_children::<E, D, F>(
context,
element,
data,
propagated_hint,
data.restyle.reconstructed_self_or_ancestor(),
note_child
);
}
// If we are in a forgetful traversal, drop the existing restyle
// data here, since we won't need to perform a post-traversal to pick up
// any change hints.
if flags.contains(Forgetful) {
data.clear_restyle_flags_and_damage();
}
// Optionally clear the descendants bit for the traversal type we're in.
if flags.for_animation_only() {
if flags.contains(ClearAnimationOnlyDirtyDescendants) {
unsafe { element.unset_animation_only_dirty_descendants(); }
}
} else {
// There are two cases when we want to clear the dity descendants bit here
// after styling this element. The first case is when we were explicitly
// asked to clear the bit by the caller.
//
// The second case is when this element is the root of a display:none
// subtree, even if the style didn't change (since, if the style did change,
// we'd have already cleared it above).
//
// This keeps the tree in a valid state without requiring the DOM to check
// display:none on the parent when inserting new children (which can be
// moderately expensive). Instead, DOM implementations can unconditionally
// set the dirty descendants bit on any styled parent, and let the traversal
// sort it out.
if flags.contains(ClearDirtyDescendants) ||
data.styles.is_display_none() {
unsafe { element.unset_dirty_descendants(); }
}
}
context.thread_local.end_element(element);
}
fn compute_style<E>(
traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>,
element: E,
data: &mut ElementData
) -> ChildCascadeRequirement
where
E: TElement,
{
use data::RestyleKind::*;
use sharing::StyleSharingResult::*;
context.thread_local.statistics.elements_styled += 1;
let kind = data.restyle
|
{
debug_assert!(cfg!(feature = "gecko") ||
parent.has_current_styles_for_traversal(parent_data, context.shared.traversal_flags));
// If the parent computed display:none, we don't style the subtree.
if parent_data.styles.is_display_none() {
debug!("Parent {:?} is display:none, culling traversal", parent);
return true;
}
// Gecko-only XBL handling.
//
// If we're computing initial styles and the parent has a Gecko XBL
// binding, that binding may inject anonymous children and remap the
// explicit children to an insertion point (or hide them entirely). It
// may also specify a scoped stylesheet, which changes the rules that
// apply within the subtree. These two effects can invalidate the result
// of property inheritance and selector matching (respectively) within
// the subtree.
//
|
identifier_body
|
traversal.rs
|
let parent = node.traversal_parent().unwrap();
let remaining = parent.did_process_child();
if remaining!= 0 {
// The parent has other unprocessed descendants. We only
// perform postorder processing after the last descendant
// has been processed.
break
}
node = parent.as_node();
}
} else {
// Otherwise record the number of children to process when the time
// comes.
node.as_element().unwrap()
.store_children_to_process(children_to_process);
}
}
/// Style invalidations happen when traversing from a parent to its children.
/// However, this mechanism can't handle style invalidations on the root. As
/// such, we have a pre-traversal step to handle that part and determine whether
/// a full traversal is needed.
fn
|
(
root: E,
shared_context: &SharedStyleContext,
traversal_flags: TraversalFlags
) -> PreTraverseToken {
// If this is an unstyled-only traversal, the caller has already verified
// that there's something to traverse, and we don't need to do any
// invalidation since we're not doing any restyling.
if traversal_flags.contains(traversal_flags::UnstyledOnly) {
return PreTraverseToken(true)
}
let flags = shared_context.traversal_flags;
let mut data = root.mutate_data();
let mut data = data.as_mut().map(|d| &mut **d);
if let Some(ref mut data) = data {
// Invalidate our style, and the one of our siblings and descendants
// as needed.
data.invalidate_style_if_needed(root, shared_context);
};
let parent = root.traversal_parent();
let parent_data = parent.as_ref().and_then(|p| p.borrow_data());
let should_traverse = Self::element_needs_traversal(
root,
flags,
data.map(|d| &*d),
parent_data.as_ref().map(|d| &**d)
);
PreTraverseToken(should_traverse)
}
/// Returns true if traversal should visit a text node. The style system
/// never processes text nodes, but Servo overrides this to visit them for
/// flow construction when necessary.
fn text_node_needs_traversal(node: E::ConcreteNode, _parent_data: &ElementData) -> bool {
debug_assert!(node.is_text_node());
false
}
/// Returns true if traversal is needed for the given element and subtree.
///
/// The caller passes |parent_data|, which is only null if there is no
/// parent.
fn element_needs_traversal(
el: E,
traversal_flags: TraversalFlags,
data: Option<&ElementData>,
parent_data: Option<&ElementData>,
) -> bool {
debug!("element_needs_traversal({:?}, {:?}, {:?}, {:?})",
el, traversal_flags, data, parent_data);
if traversal_flags.contains(traversal_flags::UnstyledOnly) {
return data.map_or(true, |d|!d.has_styles()) || el.has_dirty_descendants();
}
// In case of animation-only traversal we need to traverse the element
// if the element has animation only dirty descendants bit,
// animation-only restyle hint or recascade.
if traversal_flags.for_animation_only() {
return data.map_or(false, |d| d.has_styles()) &&
(el.has_animation_only_dirty_descendants() ||
data.as_ref().unwrap().restyle.hint.has_animation_hint_or_recascade());
}
// Non-incremental layout visits every node.
if is_servo_nonincremental_layout() {
return true;
}
// Unwrap the data.
let data = match data {
Some(d) if d.has_styles() => d,
_ => return true,
};
// If the element is native-anonymous and an ancestor frame will be
// reconstructed, the child and all its descendants will be destroyed.
// In that case, we wouldn't need to traverse the subtree...
//
// Except if there could be transitions of pseudo-elements, in which
// case we still need to process them, unfortunately.
//
// We need to conservatively continue the traversal to style the
// pseudo-element in order to properly process potentially-new
// transitions that we won't see otherwise.
//
// But it may be that we no longer match, so detect that case and act
// appropriately here.
if el.is_native_anonymous() {
if let Some(parent_data) = parent_data {
let going_to_reframe =
parent_data.restyle.reconstructed_self_or_ancestor();
let mut is_before_or_after_pseudo = false;
if let Some(pseudo) = el.implemented_pseudo_element() {
if pseudo.is_before_or_after() {
is_before_or_after_pseudo = true;
let still_match =
parent_data.styles.pseudos.get(&pseudo).is_some();
if!still_match {
debug_assert!(going_to_reframe,
"We're removing a pseudo, so we \
should reframe!");
return false;
}
}
}
if going_to_reframe &&!is_before_or_after_pseudo {
debug!("Element {:?} is in doomed NAC subtree, \
culling traversal", el);
return false;
}
}
}
// If the dirty descendants bit is set, we need to traverse no matter
// what. Skip examining the ElementData.
if el.has_dirty_descendants() {
return true;
}
// If we have a restyle hint or need to recascade, we need to visit the
// element.
//
// Note that this is different than checking has_current_styles_for_traversal(),
// since that can return true even if we have a restyle hint indicating
// that the element's descendants (but not necessarily the element) need
// restyling.
if!data.restyle.hint.is_empty() {
return true;
}
// Servo uses the post-order traversal for flow construction, so we need
// to traverse any element with damage so that we can perform fixup /
// reconstruction on our way back up the tree.
//
// In aggressively forgetful traversals (where we seek out and clear damage
// in addition to not computing it) we also need to traverse nodes with
// explicit damage and no other restyle data, so that this damage can be cleared.
if (cfg!(feature = "servo") ||
traversal_flags.contains(traversal_flags::AggressivelyForgetful)) &&
!data.restyle.damage.is_empty() {
return true;
}
trace!("{:?} doesn't need traversal", el);
false
}
/// Returns true if we want to cull this subtree from the travesal.
fn should_cull_subtree(
&self,
context: &mut StyleContext<E>,
parent: E,
parent_data: &ElementData,
) -> bool {
debug_assert!(cfg!(feature = "gecko") ||
parent.has_current_styles_for_traversal(parent_data, context.shared.traversal_flags));
// If the parent computed display:none, we don't style the subtree.
if parent_data.styles.is_display_none() {
debug!("Parent {:?} is display:none, culling traversal", parent);
return true;
}
// Gecko-only XBL handling.
//
// If we're computing initial styles and the parent has a Gecko XBL
// binding, that binding may inject anonymous children and remap the
// explicit children to an insertion point (or hide them entirely). It
// may also specify a scoped stylesheet, which changes the rules that
// apply within the subtree. These two effects can invalidate the result
// of property inheritance and selector matching (respectively) within
// the subtree.
//
// To avoid wasting work, we defer initial styling of XBL subtrees until
// frame construction, which does an explicit traversal of the unstyled
// children after shuffling the subtree. That explicit traversal may in
// turn find other bound elements, which get handled in the same way.
//
// We explicitly avoid handling restyles here (explicitly removing or
// changing bindings), since that adds complexity and is rarer. If it
// happens, we may just end up doing wasted work, since Gecko
// recursively drops Servo ElementData when the XBL insertion parent of
// an Element is changed.
if cfg!(feature = "gecko") && context.thread_local.is_initial_style() &&
parent_data.styles.primary().has_moz_binding()
{
debug!("Parent {:?} has XBL binding, deferring traversal", parent);
return true;
}
return false;
}
/// Return the shared style context common to all worker threads.
fn shared_context(&self) -> &SharedStyleContext;
/// Whether we're performing a parallel traversal.
///
/// NB: We do this check on runtime. We could guarantee correctness in this
/// regard via the type system via a `TraversalDriver` trait for this trait,
/// that could be one of two concrete types. It's not clear whether the
/// potential code size impact of that is worth it.
fn is_parallel(&self) -> bool;
}
/// Manually resolve style by sequentially walking up the parent chain to the
/// first styled Element, ignoring pending restyles. The resolved style is made
/// available via a callback, and can be dropped by the time this function
/// returns in the display:none subtree case.
pub fn resolve_style<E>(
context: &mut StyleContext<E>,
element: E,
rule_inclusion: RuleInclusion,
ignore_existing_style: bool,
) -> ElementStyles
where
E: TElement,
{
use style_resolver::StyleResolverForElement;
debug_assert!(rule_inclusion == RuleInclusion::DefaultOnly ||
ignore_existing_style ||
element.borrow_data().map_or(true, |d|!d.has_styles()),
"Why are we here?");
let mut ancestors_requiring_style_resolution = SmallVec::<[E; 16]>::new();
// Clear the bloom filter, just in case the caller is reusing TLS.
context.thread_local.bloom_filter.clear();
let mut style = None;
let mut ancestor = element.traversal_parent();
while let Some(current) = ancestor {
if rule_inclusion == RuleInclusion::All &&!ignore_existing_style {
if let Some(data) = current.borrow_data() {
if let Some(ancestor_style) = data.styles.get_primary() {
style = Some(ancestor_style.clone());
break;
}
}
}
ancestors_requiring_style_resolution.push(current);
ancestor = current.traversal_parent();
}
if let Some(ancestor) = ancestor {
context.thread_local.bloom_filter.rebuild(ancestor);
context.thread_local.bloom_filter.push(ancestor);
}
let mut layout_parent_style = style.clone();
while let Some(style) = layout_parent_style.take() {
if!style.is_display_contents() {
layout_parent_style = Some(style);
break;
}
ancestor = ancestor.unwrap().traversal_parent();
layout_parent_style = ancestor.map(|a| {
a.borrow_data().unwrap().styles.primary().clone()
});
}
for ancestor in ancestors_requiring_style_resolution.iter().rev() {
context.thread_local.bloom_filter.assert_complete(*ancestor);
let primary_style =
StyleResolverForElement::new(*ancestor, context, rule_inclusion)
.resolve_primary_style(
style.as_ref().map(|s| &**s),
layout_parent_style.as_ref().map(|s| &**s)
);
let is_display_contents = primary_style.style.is_display_contents();
style = Some(primary_style.style);
if!is_display_contents {
layout_parent_style = style.clone();
}
context.thread_local.bloom_filter.push(*ancestor);
}
context.thread_local.bloom_filter.assert_complete(element);
StyleResolverForElement::new(element, context, rule_inclusion)
.resolve_style(
style.as_ref().map(|s| &**s),
layout_parent_style.as_ref().map(|s| &**s)
)
}
/// Calculates the style for a single node.
#[inline]
#[allow(unsafe_code)]
pub fn recalc_style_at<E, D, F>(
traversal: &D,
traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>,
element: E,
data: &mut ElementData,
note_child: F,
)
where
E: TElement,
D: DomTraversal<E>,
F: FnMut(E::ConcreteNode),
{
use traversal_flags::*;
let flags = context.shared.traversal_flags;
context.thread_local.begin_element(element, data);
context.thread_local.statistics.elements_traversed += 1;
debug_assert!(flags.intersects(AnimationOnly | UnstyledOnly) ||
!element.has_snapshot() || element.handled_snapshot(),
"Should've handled snapshots here already");
let compute_self =!element.has_current_styles_for_traversal(data, flags);
let mut hint = RestyleHint::empty();
debug!("recalc_style_at: {:?} (compute_self={:?}, \
dirty_descendants={:?}, data={:?})",
element, compute_self, element.has_dirty_descendants(), data);
// Compute style for this element if necessary.
if compute_self {
match compute_style(traversal_data, context, element, data) {
ChildCascadeRequirement::MustCascadeChildren => {
hint |= RECASCADE_SELF;
}
ChildCascadeRequirement::MustCascadeDescendants => {
hint |= RECASCADE_SELF | RECASCADE_DESCENDANTS;
}
ChildCascadeRequirement::CanSkipCascade => {}
};
// We must always cascade native anonymous subtrees, since they inherit
// styles from their first non-NAC ancestor.
if element.is_native_anonymous() {
hint |= RECASCADE_SELF;
}
// If we're restyling this element to display:none, throw away all style
// data in the subtree, notify the caller to early-return.
if data.styles.is_display_none() {
debug!("{:?} style is display:none - clearing data from descendants.",
element);
clear_descendant_data(element)
}
// Inform any paint worklets of changed style, to speculatively
// evaluate the worklet code. In the case that the size hasn't changed,
// this will result in increased concurrency between script and layout.
notify_paint_worklet(context, data);
} else {
debug_assert!(data.has_styles());
data.restyle.set_traversed_without_styling();
}
// Now that matching and cascading is done, clear the bits corresponding to
// those operations and compute the propagated restyle hint (unless we're
// not processing invalidations, in which case don't need to propagate it
// and must avoid clearing it).
let mut propagated_hint = if flags.contains(UnstyledOnly) {
RestyleHint::empty()
} else {
debug_assert!(flags.for_animation_only() ||
!data.restyle.hint.has_animation_hint(),
"animation restyle hint should be handled during \
animation-only restyles");
data.restyle.hint.propagate(&flags)
};
// FIXME(bholley): Need to handle explicitly-inherited reset properties
// somewhere.
propagated_hint.insert(hint);
trace!("propagated_hint={:?} \
is_display_none={:?}, implementing_pseudo={:?}",
propagated_hint,
data.styles.is_display_none(),
element.implemented_pseudo_element());
debug_assert!(element.has_current_styles_for_traversal(data, flags),
"Should have computed style or haven't yet valid computed \
style in case of animation-only restyle");
let has_dirty_descendants_for_this_restyle =
if flags.for_animation_only() {
element.has_animation_only_dirty_descendants()
} else {
element.has_dirty_descendants()
};
// Before examining each child individually, try to prove that our children
// don't need style processing. They need processing if any of the following
// conditions hold:
// * We have the dirty descendants bit.
// * We're propagating a hint.
// * This is the initial style.
// * We generated a reconstruct hint on self (which could mean that we
// switched from display:none to something else, which means the children
// need initial styling).
// * This is a servo non-incremental traversal.
//
// Additionally, there are a few scenarios where we avoid traversing the
// subtree even if descendant styles are out of date. These cases are
// enumerated in should_cull_subtree().
let mut traverse_children = has_dirty_descendants_for_this_restyle ||
!propagated_hint.is_empty() ||
context.thread_local.is_initial_style() ||
data.restyle.reconstructed_self() ||
is_servo_nonincremental_layout();
traverse_children = traverse_children &&
!traversal.should_cull_subtree(context, element, &data);
// Examine our children, and enqueue the appropriate ones for traversal.
if traverse_children {
note_children::<E, D, F>(
context,
element,
data,
propagated_hint,
data.restyle.reconstructed_self_or_ancestor(),
note_child
);
}
// If we are in a forgetful traversal, drop the existing restyle
// data here, since we won't need to perform a post-traversal to pick up
// any change hints.
if flags.contains(Forgetful) {
data.clear_restyle_flags_and_damage();
}
// Optionally clear the descendants bit for the traversal type we're in.
if flags.for_animation_only() {
if flags.contains(ClearAnimationOnlyDirtyDescendants) {
unsafe { element.unset_animation_only_dirty_descendants(); }
}
} else {
// There are two cases when we want to clear the dity descendants bit here
// after styling this element. The first case is when we were explicitly
// asked to clear the bit by the caller.
//
// The second case is when this element is the root of a display:none
// subtree, even if the style didn't change (since, if the style did change,
// we'd have already cleared it above).
//
// This keeps the tree in a valid state without requiring the DOM to check
// display:none on the parent when inserting new children (which can be
// moderately expensive). Instead, DOM implementations can unconditionally
// set the dirty descendants bit on any styled parent, and let the traversal
// sort it out.
if flags.contains(ClearDirtyDescendants) ||
data.styles.is_display_none() {
unsafe { element.unset_dirty_descendants(); }
}
}
context.thread_local.end_element(element);
}
fn compute_style<E>(
traversal_data: &PerLevelTraversalData,
context: &mut StyleContext<E>,
element: E,
data: &mut ElementData
) -> ChildCascadeRequirement
where
E: TElement,
{
use data::RestyleKind::*;
use sharing::StyleSharingResult::*;
context.thread_local.statistics.elements_styled += 1;
let kind = data.restyle
|
pre_traverse
|
identifier_name
|
16853_H1.rs
|
Per(s)PSA @ damp 02% PSA @ damp 05% PSA @ damp 07% PSA @ damp 10% PSA @ damp 20% PSA @ damp 30% (m/s/s)
0.000 9.4427940E-002 9.4423000E-002 9.4423000E-002 9.4423000E-002 9.4423000E-002 9.4423000E-002
0.010 9.5077590E-002 9.4428840E-002 9.4429890E-002 9.4431070E-002 9.4431960E-002 9.4428650E-002
0.020 9.5591330E-002 9.4625260E-002 9.4620050E-002 9.4612840E-002 9.4594430E-002 9.4568770E-002
0.030 9.5749930E-002 9.5000390E-002 9.4967740E-002 9.4936250E-002 9.4869930E-002 9.4800450E-002
0.040 9.8745260E-002 9.5374380E-002 9.5329660E-002 9.5295420E-002 9.5214410E-002 9.5103240E-002
0.050 9.6976610E-002 9.5949570E-002 9.5942870E-002 9.5893950E-002 9.5705000E-002 9.5493090E-002
0.075 1.1531181E-001 9.8418660E-002 9.8138930E-002 9.7762490E-002 9.6919100E-002 9.6423990E-002
0.100 1.2062754E-001 9.4149200E-002 9.4372430E-002 9.4898500E-002 9.6941000E-002 9.7405860E-002
0.110 1.0883858E-001 1.0466158E-001 1.0242570E-001 1.0061959E-001 9.8996480E-002 9.8575940E-002
0.120 1.1245289E-001 1.1175528E-001 1.0799459E-001 1.0496579E-001 1.0139600E-001 1.0001275E-001
0.130 1.1015215E-001 1.0287407E-001 1.0460675E-001 1.0488870E-001 1.0318545E-001 1.0145257E-001
0.140 1.6039297E-001 1.0596312E-001 1.0548107E-001 1.0560759E-001 1.0487497E-001 1.0288734E-001
0.150 1.2498645E-001 1.0674112E-001 1.0796427E-001 1.0857921E-001 1.0699045E-001 1.0436486E-001
0.160 1.3663658E-001 1.2576041E-001 1.1889431E-001 1.1454626E-001 1.0941206E-001 1.0585318E-001
0.170 1.8114334E-001 1.1299922E-001 1.1594012E-001 1.1643350E-001 1.1174187E-001 1.0726032E-001
0.180 1.8843634E-001 1.2634608E-001 1.2433743E-001 1.2162153E-001 1.1386395E-001 1.0850130E-001
0.190 2.4428245E-001 1.4613588E-001 1.3617998E-001 1.2763548E-001 1.1544209E-001 1.0951513E-001
0.200 2.8676498E-001 1.4930499E-001 1.3740356E-001 1.2805711E-001 1.1609120E-001 1.1030254E-001
0.220 2.6867819E-001 1.8119521E-001 1.6100505E-001 1.4197567E-001 1.1538588E-001 1.1160781E-001
0.240 3.5831174E-001 1.6528116E-001 1.5364350E-001 1.3919891E-001 1.1542684E-001 1.1361472E-001
0.260 3.6949748E-001 2.0494702E-001 1.8592773E-001 1.6347633E-001 1.2068838E-001 1.1725112E-001
0.280 6.0108298E-001 2.5464493E-001 2.2191884E-001 1.8646128E-001 1.3218696E-001 1.2230708E-001
0.300 4.7455946E-001 2.6836160E-001 2.3960426E-001 1.9853677E-001 1.4644003E-001 1.2759830E-001
0.320 7.4330324E-001 3.3365777E-001 2.6509777E-001 2.2204168E-001 1.6004242E-001 1.3172095E-001
0.340 4.7217992E-001 3.6261472E-001 3.1050006E-001 2.5464940E-001 1.6835248E-001 1.3359098E-001
0.360 3.8319096E-001 4.0063030E-001 3.0961069E-001 2.5493053E-001 1.6886139E-001 1.3288547E-001
0.380 3.9555305E-001 3.5029444E-001 2.9184261E-001 2.3068261E-001 1.6293937E-001 1.3001145E-001
0.400 3.1193888E-001 3.1979039E-001 2.7496523E-001 2.2115907E-001 1.5350646E-001 1.2580371E-001
0.420 2.4612994E-001 2.9751867E-001 2.5141686E-001 2.0318647E-001 1.4291930E-001 1.2107600E-001
0.440 2.7351367E-001 2.3752791E-001 2.1014613E-001 1.7755309E-001 1.3284960E-001 1.1653665E-001
0.460 2.7012473E-001 1.8326020E-001 1.7182265E-001 1.5363026E-001 1.2481420E-001 1.1260331E-001
0.480 2.4799959E-001 1.8878539E-001 1.5952496E-001 1.3872321E-001 1.1993241E-001 1.0946549E-001
0.500 2.7150074E-001 1.8099619E-001 1.5631880E-001 1.3197145E-001 1.1823482E-001 1.0709962E-001
|
0.650 2.1797280E-001 1.8877083E-001 1.7303199E-001 1.5523672E-001 1.1628085E-001 9.2407720E-002
0.700 1.4387584E-001 2.1101710E-001 1.7496218E-001 1.4056417E-001 1.0735576E-001 8.4936020E-002
0.750 2.1221133E-001 1.6501458E-001 1.4778031E-001 1.3065909E-001 9.7020360E-002 7.7791750E-002
0.800 2.4024567E-001 1.3203551E-001 1.2476300E-001 1.1721472E-001 9.6638020E-002 7.8506170E-002
0.850 2.6158634E-001 1.6090111E-001 1.4763765E-001 1.3271925E-001 9.8795340E-002 7.7857970E-002
0.900 3.2579094E-001 1.8892585E-001 1.6800646E-001 1.4374727E-001 9.8360940E-002 7.5836030E-002
0.950 2.0211270E-001 1.9576155E-001 1.6988325E-001 1.4206217E-001 9.4453400E-002 7.2609440E-002
1.000 1.2281635E-001 1.9655029E-001 1.5892756E-001 1.2749888E-001 8.7660710E-002 6.8579220E-002
1.100 1.0884829E-001 1.6333638E-001 1.4578114E-001 1.2460196E-001 8.2669110E-002 6.0797080E-002
1.200 1.4473225E-001 1.1467690E-001 1.1025990E-001 1.0259250E-001 7.6106900E-002 5.7322580E-002
1.300 1.1749568E-001 1.0529593E-001 1.0082978E-001 9.2977890E-002 6.8447830E-002 5.4146130E-002
1.400 9.8597820E-002 1.1508367E-001 1.0183192E-001 8.7511700E-002 6.0750530E-002 5.0988340E-002
1.500 1.0426316E-001 9.2942090E-002 8.2913580E-002 7.2584140E-002 5.6679380E-002 4.7804990E-002
1.600 1.0948269E-001 8.4281690E-002 7.7112080E-002 6.8366100E-002 5.2427970E-002 4.4699470E-002
1.700 1.2348919E-001 8.6900290E-002 7.8160320E-002 6.7807030E-002 4.9545370E-002 4.1763870E-002
1.800 1.3053462E-001 8.2094100E-002 7.4269560E-002 6.4578660E-002 4.7759380E-002 3.8871200E-002
1.900 8.5627760E-002 9.2201760E-002 7.9802510E-002 6.6791880E-002 4.5371000E-002 3.5802240E-002
2.000 5.5560060E-002 9.1473710E-002 7.7998710E-002 6.3743940E-002 4.1338440E-002 3.4232910E-002
2.200 4.6331560E-002 6.3030120E-002 5.3000380E-002 4.7489860E-002 3.7152610E-002 3.1342600E-002
2.400 3.2608640E-002 4.3966040E-002 4.0873300E-002 3.7735950E-002 3.1885800E-002 2.7965410E-002
2.600 5.2213990E-002 3.5629800E-002 3.0792830E-002 2.7902060E-002 2.7370990E-002 2.4910060E-002
2.800 4.2672350E-002 2.8944080E-002 2.7223890E-002 2.5551120E-002 2.4590340E-002 2.2464620E-002
3.000 3.4073620E-002 3.5273820E-002 2.9662590E-002 2.6169930E-002 2.2801780E-002 2.0511940E-002
3.200 2.5034220E-002 2.9120490E-002 2.7138020E-002 2.4994530E-002 2.1122620E-002 1.8855530E-002
3.400 2.3061410E-002 2.5553180E-002 2.3049050E-002 2.1976190E-002 1.9362510E-002 1.7381030E-002
3.600 2.4168030E-002 2.1682010E-002 1.9482470E-002 1.8977770E-002 1.7738650E-002 1.6064410E-002
3.800 3.1657720E-002 1.9889630E-002 1.8580320E-002 1.7326040E-002 1.6427010E-002 1.4899440E-002
4.000 4.0096860E-002 2.2407680E-002 2.0734300E-002 1.8286550E-002 1.5396490E-002 1.3863560E-002
4.200 4.0433120E-002 2.5857150E-002 2.2678820E-002 1.8864600E-002 1.4502740E-002 1.2921090E-002
4.400 3.2261790E-002 2.7998450E-002 2.3126290E-002 1.8211300E-002 1.3619590E-002 1.2042460E-002
4.600 2.1424730E-002 2.6222210E-002 2.2056670E-002 1.7904330E-002 1.2702650E-002 1.1210660E-002
4.800 1.4190690E-002 2.2940540E-002 1.9974700E-002 1.6741140E-002 1.1777480E-002 1.0422550E-002
5.000 1.7715990E-002 1.8192830E-002 1.6917750E-002 1.5063780E-002 1.0892670E-002 9.6794700E-003
5.500 1.7830570E-002 1.2984720E-002 1.2662700E-002 1.1859240E-002 9.0320200E-003 8.0245500E-003
6.000 1.2652570E-002 1.3827240E-002 1.2079090E-002 1.0215070E-002 7.5218900E-003 6.6082400E-003
6.500 1.0224530E-002 1.3029930E-002 1.1032190E-002 9.0453600E-003 6.2045600E-003 5.3621100E-003
7.000 7.7846400E-003 9.9548500E-003 8.8900500E-003 7.6258300E-003 5.3853700E-003 4.3119600E-003
7.500 6.6763900E-003 7.9839800E-003 7.3638300E-003 6.5447800E-003 4.6090000E-003 3.7133900E-003
8.000 6.0072400E-003 6.5414200E-003 5.8798500E-003 5.2663300E-003 3.9152400E-003 3.1486900E-003
8.500 5.4644900E-003 5.7480600E-003 5.1834400E-003 4.5316400E-003 3.5816200E-003 2.9282600E-003
9.000 5.8168700E-003 5.1265100E-003 4.6232900E-003 4.1658500E-003 3.3338900E-003 2.7751400E-003
9.500 5.4644900E-003 4.4708400E-003 4.1621400E-003 3.8724500E-003 3.0659100E-003 2.6300100E-003
10.00 5.8168700E-003 4.6010800E-003 4.0257000E-003 3.5561700E-003 2.7891800E-003 2.4935000E-003
-1 9.4633770E-002 1.4057537E-002 1.4057537E-002 1.4057537E-002 1.4057537E-002 1.4057537E-002
|
0.550 2.2443265E-001 2.0542115E-001 1.7928344E-001 1.5216398E-001 1.2084850E-001 1.0293319E-001
0.600 3.2437012E-001 2.1653445E-001 1.8949097E-001 1.6210338E-001 1.2160170E-001 9.8512280E-002
|
random_line_split
|
wl_buffer.rs
|
// Copyright 2014 Jonathan Eyolfson
use libc::{c_int, c_void, uint32_t};
use raw;
use raw::types::listeners;
use raw::types::objects;
pub const WL_BUFFER_DESTROY: uint32_t = 0;
#[inline(always)]
pub unsafe fn wl_buffer_add_listener(
wl_buffer: *mut objects::wl_buffer,
listener: *const listeners::wl_buffer_listener,
data: *mut c_void
) -> c_int {
raw::wl_proxy_add_listener(
wl_buffer as *mut objects::wl_proxy,
listener as *mut extern fn(),
data
)
}
#[inline(always)]
pub unsafe fn
|
(
wl_buffer: *mut objects::wl_buffer,
user_data: *mut c_void
) {
raw::wl_proxy_set_user_data(
wl_buffer as *mut objects::wl_proxy,
user_data
)
}
#[inline(always)]
pub unsafe fn wl_buffer_get_user_data(
wl_buffer: *mut objects::wl_buffer
) -> *mut c_void {
raw::wl_proxy_get_user_data(wl_buffer as *mut objects::wl_proxy)
}
#[inline(always)]
pub unsafe fn wl_buffer_destroy(
wl_buffer: *mut objects::wl_buffer
) {
raw::wl_proxy_marshal(
wl_buffer as *mut objects::wl_proxy,
WL_BUFFER_DESTROY
);
raw::wl_proxy_destroy(wl_buffer as *mut objects::wl_proxy)
}
|
wl_buffer_set_user_data
|
identifier_name
|
wl_buffer.rs
|
// Copyright 2014 Jonathan Eyolfson
use libc::{c_int, c_void, uint32_t};
use raw;
use raw::types::listeners;
use raw::types::objects;
pub const WL_BUFFER_DESTROY: uint32_t = 0;
#[inline(always)]
pub unsafe fn wl_buffer_add_listener(
wl_buffer: *mut objects::wl_buffer,
listener: *const listeners::wl_buffer_listener,
data: *mut c_void
) -> c_int {
raw::wl_proxy_add_listener(
|
listener as *mut extern fn(),
data
)
}
#[inline(always)]
pub unsafe fn wl_buffer_set_user_data(
wl_buffer: *mut objects::wl_buffer,
user_data: *mut c_void
) {
raw::wl_proxy_set_user_data(
wl_buffer as *mut objects::wl_proxy,
user_data
)
}
#[inline(always)]
pub unsafe fn wl_buffer_get_user_data(
wl_buffer: *mut objects::wl_buffer
) -> *mut c_void {
raw::wl_proxy_get_user_data(wl_buffer as *mut objects::wl_proxy)
}
#[inline(always)]
pub unsafe fn wl_buffer_destroy(
wl_buffer: *mut objects::wl_buffer
) {
raw::wl_proxy_marshal(
wl_buffer as *mut objects::wl_proxy,
WL_BUFFER_DESTROY
);
raw::wl_proxy_destroy(wl_buffer as *mut objects::wl_proxy)
}
|
wl_buffer as *mut objects::wl_proxy,
|
random_line_split
|
wl_buffer.rs
|
// Copyright 2014 Jonathan Eyolfson
use libc::{c_int, c_void, uint32_t};
use raw;
use raw::types::listeners;
use raw::types::objects;
pub const WL_BUFFER_DESTROY: uint32_t = 0;
#[inline(always)]
pub unsafe fn wl_buffer_add_listener(
wl_buffer: *mut objects::wl_buffer,
listener: *const listeners::wl_buffer_listener,
data: *mut c_void
) -> c_int {
raw::wl_proxy_add_listener(
wl_buffer as *mut objects::wl_proxy,
listener as *mut extern fn(),
data
)
}
#[inline(always)]
pub unsafe fn wl_buffer_set_user_data(
wl_buffer: *mut objects::wl_buffer,
user_data: *mut c_void
) {
raw::wl_proxy_set_user_data(
wl_buffer as *mut objects::wl_proxy,
user_data
)
}
#[inline(always)]
pub unsafe fn wl_buffer_get_user_data(
wl_buffer: *mut objects::wl_buffer
) -> *mut c_void {
raw::wl_proxy_get_user_data(wl_buffer as *mut objects::wl_proxy)
}
#[inline(always)]
pub unsafe fn wl_buffer_destroy(
wl_buffer: *mut objects::wl_buffer
)
|
{
raw::wl_proxy_marshal(
wl_buffer as *mut objects::wl_proxy,
WL_BUFFER_DESTROY
);
raw::wl_proxy_destroy(wl_buffer as *mut objects::wl_proxy)
}
|
identifier_body
|
|
memory_usage.rs
|
use concurrency_manager::ConcurrencyManager;
use futures::executor::block_on;
use rand::prelude::*;
|
// This test is heavy so we shouldn't run it daily.
// Run it with the following command (recommending release mode) and see the printed stats:
//
// ```
// cargo test --package concurrency_manager --test memory_usage --features jemalloc --release -- test_memory_usage --exact --ignored --nocapture
// ```
#[test]
#[ignore]
fn test_memory_usage() {
// Make key as small as possible, so concurrent writes in the scheduler
// can be as many as possible. Then, we can maximize the amplication
// caused by the data structure.
const KEY_LEN: usize = 16;
// We allow 100MB pending writes in the scheduler.
const LOCK_COUNT: usize = 100 * 1024 * 1024 / KEY_LEN;
// Let system collect the memory to avoid drop time at the end of the test.
let cm = ManuallyDrop::new(ConcurrencyManager::new(1.into()));
const THR_NUM: usize = 8;
let mut ths = Vec::with_capacity(THR_NUM);
for _ in 0..THR_NUM {
let cm = cm.clone();
let th = thread::spawn(move || {
for _ in 0..(LOCK_COUNT / THR_NUM) {
let mut raw = vec![0; KEY_LEN];
thread_rng().fill_bytes(&mut raw[..]);
let key = Key::from_raw(&raw);
let lock = Lock::new(
LockType::Put,
raw,
10.into(),
1000,
None,
10.into(),
1,
20.into(),
);
// Key already exists
if cm.read_key_check(&key, |_| Err(())).is_err() {
continue;
}
let guard = block_on(cm.lock_key(&key));
guard.with_lock(|l| {
*l = Some(lock);
});
forget(guard);
}
});
ths.push(th);
}
ths.into_iter().for_each(|th| th.join().unwrap());
println!("{:?}", tikv_alloc::fetch_stats());
}
|
use std::{mem::forget, mem::ManuallyDrop, thread};
use txn_types::{Key, Lock, LockType};
|
random_line_split
|
memory_usage.rs
|
use concurrency_manager::ConcurrencyManager;
use futures::executor::block_on;
use rand::prelude::*;
use std::{mem::forget, mem::ManuallyDrop, thread};
use txn_types::{Key, Lock, LockType};
// This test is heavy so we shouldn't run it daily.
// Run it with the following command (recommending release mode) and see the printed stats:
//
// ```
// cargo test --package concurrency_manager --test memory_usage --features jemalloc --release -- test_memory_usage --exact --ignored --nocapture
// ```
#[test]
#[ignore]
fn test_memory_usage() {
// Make key as small as possible, so concurrent writes in the scheduler
// can be as many as possible. Then, we can maximize the amplication
// caused by the data structure.
const KEY_LEN: usize = 16;
// We allow 100MB pending writes in the scheduler.
const LOCK_COUNT: usize = 100 * 1024 * 1024 / KEY_LEN;
// Let system collect the memory to avoid drop time at the end of the test.
let cm = ManuallyDrop::new(ConcurrencyManager::new(1.into()));
const THR_NUM: usize = 8;
let mut ths = Vec::with_capacity(THR_NUM);
for _ in 0..THR_NUM {
let cm = cm.clone();
let th = thread::spawn(move || {
for _ in 0..(LOCK_COUNT / THR_NUM) {
let mut raw = vec![0; KEY_LEN];
thread_rng().fill_bytes(&mut raw[..]);
let key = Key::from_raw(&raw);
let lock = Lock::new(
LockType::Put,
raw,
10.into(),
1000,
None,
10.into(),
1,
20.into(),
);
// Key already exists
if cm.read_key_check(&key, |_| Err(())).is_err()
|
let guard = block_on(cm.lock_key(&key));
guard.with_lock(|l| {
*l = Some(lock);
});
forget(guard);
}
});
ths.push(th);
}
ths.into_iter().for_each(|th| th.join().unwrap());
println!("{:?}", tikv_alloc::fetch_stats());
}
|
{
continue;
}
|
conditional_block
|
memory_usage.rs
|
use concurrency_manager::ConcurrencyManager;
use futures::executor::block_on;
use rand::prelude::*;
use std::{mem::forget, mem::ManuallyDrop, thread};
use txn_types::{Key, Lock, LockType};
// This test is heavy so we shouldn't run it daily.
// Run it with the following command (recommending release mode) and see the printed stats:
//
// ```
// cargo test --package concurrency_manager --test memory_usage --features jemalloc --release -- test_memory_usage --exact --ignored --nocapture
// ```
#[test]
#[ignore]
fn test_memory_usage()
|
let lock = Lock::new(
LockType::Put,
raw,
10.into(),
1000,
None,
10.into(),
1,
20.into(),
);
// Key already exists
if cm.read_key_check(&key, |_| Err(())).is_err() {
continue;
}
let guard = block_on(cm.lock_key(&key));
guard.with_lock(|l| {
*l = Some(lock);
});
forget(guard);
}
});
ths.push(th);
}
ths.into_iter().for_each(|th| th.join().unwrap());
println!("{:?}", tikv_alloc::fetch_stats());
}
|
{
// Make key as small as possible, so concurrent writes in the scheduler
// can be as many as possible. Then, we can maximize the amplication
// caused by the data structure.
const KEY_LEN: usize = 16;
// We allow 100MB pending writes in the scheduler.
const LOCK_COUNT: usize = 100 * 1024 * 1024 / KEY_LEN;
// Let system collect the memory to avoid drop time at the end of the test.
let cm = ManuallyDrop::new(ConcurrencyManager::new(1.into()));
const THR_NUM: usize = 8;
let mut ths = Vec::with_capacity(THR_NUM);
for _ in 0..THR_NUM {
let cm = cm.clone();
let th = thread::spawn(move || {
for _ in 0..(LOCK_COUNT / THR_NUM) {
let mut raw = vec![0; KEY_LEN];
thread_rng().fill_bytes(&mut raw[..]);
let key = Key::from_raw(&raw);
|
identifier_body
|
memory_usage.rs
|
use concurrency_manager::ConcurrencyManager;
use futures::executor::block_on;
use rand::prelude::*;
use std::{mem::forget, mem::ManuallyDrop, thread};
use txn_types::{Key, Lock, LockType};
// This test is heavy so we shouldn't run it daily.
// Run it with the following command (recommending release mode) and see the printed stats:
//
// ```
// cargo test --package concurrency_manager --test memory_usage --features jemalloc --release -- test_memory_usage --exact --ignored --nocapture
// ```
#[test]
#[ignore]
fn
|
() {
// Make key as small as possible, so concurrent writes in the scheduler
// can be as many as possible. Then, we can maximize the amplication
// caused by the data structure.
const KEY_LEN: usize = 16;
// We allow 100MB pending writes in the scheduler.
const LOCK_COUNT: usize = 100 * 1024 * 1024 / KEY_LEN;
// Let system collect the memory to avoid drop time at the end of the test.
let cm = ManuallyDrop::new(ConcurrencyManager::new(1.into()));
const THR_NUM: usize = 8;
let mut ths = Vec::with_capacity(THR_NUM);
for _ in 0..THR_NUM {
let cm = cm.clone();
let th = thread::spawn(move || {
for _ in 0..(LOCK_COUNT / THR_NUM) {
let mut raw = vec![0; KEY_LEN];
thread_rng().fill_bytes(&mut raw[..]);
let key = Key::from_raw(&raw);
let lock = Lock::new(
LockType::Put,
raw,
10.into(),
1000,
None,
10.into(),
1,
20.into(),
);
// Key already exists
if cm.read_key_check(&key, |_| Err(())).is_err() {
continue;
}
let guard = block_on(cm.lock_key(&key));
guard.with_lock(|l| {
*l = Some(lock);
});
forget(guard);
}
});
ths.push(th);
}
ths.into_iter().for_each(|th| th.join().unwrap());
println!("{:?}", tikv_alloc::fetch_stats());
}
|
test_memory_usage
|
identifier_name
|
print.rs
|
// TODO: get num cols of terminal dynamically
// TODO: care about the given print options
use super::{Report, ReportKind, RemarkKind, Snippet};
use code::{FileMap, LineIdx, Span};
use term_painter::ToStyle;
use term_painter::Color::*;
use std::default::Default;
/// Options for printing on the terminal. By `default()` everything is enabled.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PrintOptions {
/// Use of unicode allowed?
pub unicode: bool,
/// Use of colors allowed?
pub color: bool,
/// Is line wrapping allowed?
pub line_wrap: bool,
}
impl Default for PrintOptions {
fn default() -> Self {
PrintOptions {
unicode: true,
color: true,
line_wrap: true,
}
}
}
/// Pretty prints a report
///
/// **Note**: right now, the `PrintOptions` are ignored.
pub fn print(rep: &Report, src: &FileMap, _: PrintOptions) {
trace!("Printing report: {:#?}", rep);
trace!("Printing with filemap: {:#?}", src);
// print header
let title = match rep.kind {
ReportKind::Error => White.bold().bg(Red).paint("ERROR"),
ReportKind::Warning => White.bold().bg(Yellow).paint("WARNING"),
};
let (sep, line) = if let Some(span) = rep.span {
(" : ", if span.is_dummy() {
"<dummy-span>".into()
} else {
let start = src.get_loc(span.lo);
let end = src.get_loc(span.hi);
trace!("Span is from {:?} to {:?}", start, end);
if start.line!= end.line {
format!("{}-{}", start.line, end.line)
} else {
start.line.to_string()
}
})
} else {
("", "".into())
};
println!("+---- {} in {}{}{} ----+",
title,
src.filename(),
sep,
Magenta.bold().paint(line)
);
for rem in &rep.remarks {
trace!("Handling Remark {:?}", rem);
// print message
let (title, title_len) = match rem.kind {
RemarkKind::Error => (Red.paint("error:"), 6),
RemarkKind::Warning => (Yellow.paint("warning:"), 8),
RemarkKind::Note => (Green.paint("note:"), 5),
};
print!(" =====> {} ", title);
// spaces + big arrow + spaces + title + space
let indent = 6 + 6 + 2 + title_len + 1;
let block_width = 80 - indent;
let mut col = 0;
for word in rem.desc.split_whitespace() {
let word_len = word.chars().count();
if col + word_len >= block_width && col!= 0 {
println!("");
print!(" > {0:>1$} ", " ", title_len);
col = 0;
}
print!("{} ", White.bold().paint(word));
col += word_len + 1;
}
println!("");
// print code snippet
if let Some(span) = rem.snippet.span() {
print_snippet(src, span, &rem.snippet);
println!("");
}
}
println!("");
}
fn print_snippet(src: &FileMap, span: Span, snippet: &Snippet)
|
let line = line_orig.replace("\t", " ");
let num_tabs = line_orig[..start.col.0 as usize]
.chars()
.filter(|&c| c == '\t')
.count();
// adjust cols in case of replaced tabs
let startcol = start.col.0 as usize + 3*num_tabs;
let endcol = end.col.0 as usize + 3*num_tabs;
let (middle, underline_len, color) = match *snippet {
Snippet::Replace { ref with,.. } => {
(&with[..], with.len(), Green)
},
Snippet::Orig(_) => {
(&line[startcol..endcol], endcol - startcol, Yellow)
},
_ => unreachable!(),
};
// print the line
println!("{:>#4} {} {}{}{}",
Magenta.bold().paint(start.line),
Magenta.bold().paint("|"),
&line[..startcol],
color.paint(middle),
&line[endcol..],
);
// print the underline
color.with(|| {
println!(" {: <2$}{:^<3$}",
" ", "^",
startcol + 1,
underline_len,
);
});
}
// ----- Multiline -----
else {
let (lines, color) = match *snippet {
Snippet::Replace { ref with,.. } => {
let mut lines = Vec::new();
if let Some(first_break) = with.find("\n") {
// we can unwrap, because we found it from the beginning
let last_break = with.rfind("\n").unwrap();
// first line
let line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
lines.push((&line[..startcol], &with[..first_break], ""));
// lines in the middle
for line in with[..last_break].lines().skip(1) {
lines.push(("", line, ""));
}
// last line
let line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
lines.push(("", &with[last_break + 1..], &line[endcol..]));
(lines, Green)
} else {
let first_line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
let last_line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
(vec![(
&first_line[..startcol],
&with[..],
&last_line[endcol..]
)], Green)
}
},
Snippet::Orig(_) => {
let mut lines = Vec::new();
// first line
let line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
lines.push((&line[..startcol], &line[startcol..], ""));
// lines in the middle
for line_idx in (start.line.0 + 1)..end.line.0 {
let line = expect_line(src, LineIdx(line_idx));
lines.push(("", line, ""));
}
// last line
let line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
lines.push(("", &line[..endcol], &line[endcol..]));
(lines, Yellow)
},
_ => unreachable!(),
};
for (i, &(pre, middle, post)) in lines.iter().enumerate() {
println!("{:>#4} {} {}{}{}",
Magenta.bold().paint(start.line + LineIdx(i as u32)),
Magenta.bold().paint("|"),
pre,
color.paint(middle),
post,
);
}
}
}
fn expect_line(src: &FileMap, line: LineIdx) -> &str {
src.get_line(line).expect("`Loc` from FileMap should return a valid line")
}
|
{
let start = src.get_loc(span.lo);
let end = src.get_loc(span.hi);
trace!("Span is from {:?} to {:?}", start, end);
// ----- Dummyspan -----
if span.is_dummy() {
println!(" {} {} ! no snippet due to <dummy-span>, this is a bug !",
Magenta.bold().paint("?"),
Magenta.bold().paint("|"),
);
}
// ----- Singleline -----
else if start.line == end.line {
let line_orig = expect_line(src, start.line);
trace!("Printing single line span. Orig line: {:?}", line_orig);
// replace tabs
|
identifier_body
|
print.rs
|
// TODO: get num cols of terminal dynamically
// TODO: care about the given print options
use super::{Report, ReportKind, RemarkKind, Snippet};
use code::{FileMap, LineIdx, Span};
use term_painter::ToStyle;
use term_painter::Color::*;
use std::default::Default;
/// Options for printing on the terminal. By `default()` everything is enabled.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PrintOptions {
/// Use of unicode allowed?
pub unicode: bool,
/// Use of colors allowed?
pub color: bool,
/// Is line wrapping allowed?
pub line_wrap: bool,
}
impl Default for PrintOptions {
fn default() -> Self {
PrintOptions {
unicode: true,
color: true,
line_wrap: true,
}
}
}
/// Pretty prints a report
///
/// **Note**: right now, the `PrintOptions` are ignored.
pub fn
|
(rep: &Report, src: &FileMap, _: PrintOptions) {
trace!("Printing report: {:#?}", rep);
trace!("Printing with filemap: {:#?}", src);
// print header
let title = match rep.kind {
ReportKind::Error => White.bold().bg(Red).paint("ERROR"),
ReportKind::Warning => White.bold().bg(Yellow).paint("WARNING"),
};
let (sep, line) = if let Some(span) = rep.span {
(" : ", if span.is_dummy() {
"<dummy-span>".into()
} else {
let start = src.get_loc(span.lo);
let end = src.get_loc(span.hi);
trace!("Span is from {:?} to {:?}", start, end);
if start.line!= end.line {
format!("{}-{}", start.line, end.line)
} else {
start.line.to_string()
}
})
} else {
("", "".into())
};
println!("+---- {} in {}{}{} ----+",
title,
src.filename(),
sep,
Magenta.bold().paint(line)
);
for rem in &rep.remarks {
trace!("Handling Remark {:?}", rem);
// print message
let (title, title_len) = match rem.kind {
RemarkKind::Error => (Red.paint("error:"), 6),
RemarkKind::Warning => (Yellow.paint("warning:"), 8),
RemarkKind::Note => (Green.paint("note:"), 5),
};
print!(" =====> {} ", title);
// spaces + big arrow + spaces + title + space
let indent = 6 + 6 + 2 + title_len + 1;
let block_width = 80 - indent;
let mut col = 0;
for word in rem.desc.split_whitespace() {
let word_len = word.chars().count();
if col + word_len >= block_width && col!= 0 {
println!("");
print!(" > {0:>1$} ", " ", title_len);
col = 0;
}
print!("{} ", White.bold().paint(word));
col += word_len + 1;
}
println!("");
// print code snippet
if let Some(span) = rem.snippet.span() {
print_snippet(src, span, &rem.snippet);
println!("");
}
}
println!("");
}
fn print_snippet(src: &FileMap, span: Span, snippet: &Snippet) {
let start = src.get_loc(span.lo);
let end = src.get_loc(span.hi);
trace!("Span is from {:?} to {:?}", start, end);
// ----- Dummyspan -----
if span.is_dummy() {
println!(" {} {}! no snippet due to <dummy-span>, this is a bug!",
Magenta.bold().paint("?"),
Magenta.bold().paint("|"),
);
}
// ----- Singleline -----
else if start.line == end.line {
let line_orig = expect_line(src, start.line);
trace!("Printing single line span. Orig line: {:?}", line_orig);
// replace tabs
let line = line_orig.replace("\t", " ");
let num_tabs = line_orig[..start.col.0 as usize]
.chars()
.filter(|&c| c == '\t')
.count();
// adjust cols in case of replaced tabs
let startcol = start.col.0 as usize + 3*num_tabs;
let endcol = end.col.0 as usize + 3*num_tabs;
let (middle, underline_len, color) = match *snippet {
Snippet::Replace { ref with,.. } => {
(&with[..], with.len(), Green)
},
Snippet::Orig(_) => {
(&line[startcol..endcol], endcol - startcol, Yellow)
},
_ => unreachable!(),
};
// print the line
println!("{:>#4} {} {}{}{}",
Magenta.bold().paint(start.line),
Magenta.bold().paint("|"),
&line[..startcol],
color.paint(middle),
&line[endcol..],
);
// print the underline
color.with(|| {
println!(" {: <2$}{:^<3$}",
" ", "^",
startcol + 1,
underline_len,
);
});
}
// ----- Multiline -----
else {
let (lines, color) = match *snippet {
Snippet::Replace { ref with,.. } => {
let mut lines = Vec::new();
if let Some(first_break) = with.find("\n") {
// we can unwrap, because we found it from the beginning
let last_break = with.rfind("\n").unwrap();
// first line
let line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
lines.push((&line[..startcol], &with[..first_break], ""));
// lines in the middle
for line in with[..last_break].lines().skip(1) {
lines.push(("", line, ""));
}
// last line
let line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
lines.push(("", &with[last_break + 1..], &line[endcol..]));
(lines, Green)
} else {
let first_line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
let last_line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
(vec![(
&first_line[..startcol],
&with[..],
&last_line[endcol..]
)], Green)
}
},
Snippet::Orig(_) => {
let mut lines = Vec::new();
// first line
let line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
lines.push((&line[..startcol], &line[startcol..], ""));
// lines in the middle
for line_idx in (start.line.0 + 1)..end.line.0 {
let line = expect_line(src, LineIdx(line_idx));
lines.push(("", line, ""));
}
// last line
let line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
lines.push(("", &line[..endcol], &line[endcol..]));
(lines, Yellow)
},
_ => unreachable!(),
};
for (i, &(pre, middle, post)) in lines.iter().enumerate() {
println!("{:>#4} {} {}{}{}",
Magenta.bold().paint(start.line + LineIdx(i as u32)),
Magenta.bold().paint("|"),
pre,
color.paint(middle),
post,
);
}
}
}
fn expect_line(src: &FileMap, line: LineIdx) -> &str {
src.get_line(line).expect("`Loc` from FileMap should return a valid line")
}
|
print
|
identifier_name
|
print.rs
|
// TODO: get num cols of terminal dynamically
// TODO: care about the given print options
use super::{Report, ReportKind, RemarkKind, Snippet};
use code::{FileMap, LineIdx, Span};
use term_painter::ToStyle;
use term_painter::Color::*;
use std::default::Default;
/// Options for printing on the terminal. By `default()` everything is enabled.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PrintOptions {
/// Use of unicode allowed?
pub unicode: bool,
/// Use of colors allowed?
pub color: bool,
/// Is line wrapping allowed?
pub line_wrap: bool,
}
impl Default for PrintOptions {
fn default() -> Self {
PrintOptions {
unicode: true,
color: true,
line_wrap: true,
}
}
}
/// Pretty prints a report
///
/// **Note**: right now, the `PrintOptions` are ignored.
pub fn print(rep: &Report, src: &FileMap, _: PrintOptions) {
trace!("Printing report: {:#?}", rep);
trace!("Printing with filemap: {:#?}", src);
// print header
let title = match rep.kind {
ReportKind::Error => White.bold().bg(Red).paint("ERROR"),
ReportKind::Warning => White.bold().bg(Yellow).paint("WARNING"),
};
let (sep, line) = if let Some(span) = rep.span {
(" : ", if span.is_dummy() {
"<dummy-span>".into()
} else {
let start = src.get_loc(span.lo);
let end = src.get_loc(span.hi);
trace!("Span is from {:?} to {:?}", start, end);
if start.line!= end.line {
format!("{}-{}", start.line, end.line)
} else {
start.line.to_string()
}
})
} else {
("", "".into())
};
println!("+---- {} in {}{}{} ----+",
title,
src.filename(),
sep,
Magenta.bold().paint(line)
);
for rem in &rep.remarks {
trace!("Handling Remark {:?}", rem);
// print message
let (title, title_len) = match rem.kind {
RemarkKind::Error => (Red.paint("error:"), 6),
RemarkKind::Warning => (Yellow.paint("warning:"), 8),
RemarkKind::Note => (Green.paint("note:"), 5),
};
print!(" =====> {} ", title);
// spaces + big arrow + spaces + title + space
let indent = 6 + 6 + 2 + title_len + 1;
let block_width = 80 - indent;
let mut col = 0;
for word in rem.desc.split_whitespace() {
let word_len = word.chars().count();
if col + word_len >= block_width && col!= 0 {
println!("");
print!(" > {0:>1$} ", " ", title_len);
col = 0;
}
print!("{} ", White.bold().paint(word));
col += word_len + 1;
}
println!("");
// print code snippet
if let Some(span) = rem.snippet.span() {
print_snippet(src, span, &rem.snippet);
println!("");
}
}
println!("");
}
fn print_snippet(src: &FileMap, span: Span, snippet: &Snippet) {
let start = src.get_loc(span.lo);
let end = src.get_loc(span.hi);
trace!("Span is from {:?} to {:?}", start, end);
// ----- Dummyspan -----
if span.is_dummy() {
println!(" {} {}! no snippet due to <dummy-span>, this is a bug!",
Magenta.bold().paint("?"),
Magenta.bold().paint("|"),
);
}
// ----- Singleline -----
else if start.line == end.line {
let line_orig = expect_line(src, start.line);
trace!("Printing single line span. Orig line: {:?}", line_orig);
// replace tabs
let line = line_orig.replace("\t", " ");
let num_tabs = line_orig[..start.col.0 as usize]
.chars()
.filter(|&c| c == '\t')
.count();
// adjust cols in case of replaced tabs
let startcol = start.col.0 as usize + 3*num_tabs;
let endcol = end.col.0 as usize + 3*num_tabs;
let (middle, underline_len, color) = match *snippet {
Snippet::Replace { ref with,.. } => {
(&with[..], with.len(), Green)
},
Snippet::Orig(_) => {
(&line[startcol..endcol], endcol - startcol, Yellow)
},
_ => unreachable!(),
};
// print the line
println!("{:>#4} {} {}{}{}",
Magenta.bold().paint(start.line),
Magenta.bold().paint("|"),
&line[..startcol],
color.paint(middle),
&line[endcol..],
);
// print the underline
color.with(|| {
|
);
});
}
// ----- Multiline -----
else {
let (lines, color) = match *snippet {
Snippet::Replace { ref with,.. } => {
let mut lines = Vec::new();
if let Some(first_break) = with.find("\n") {
// we can unwrap, because we found it from the beginning
let last_break = with.rfind("\n").unwrap();
// first line
let line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
lines.push((&line[..startcol], &with[..first_break], ""));
// lines in the middle
for line in with[..last_break].lines().skip(1) {
lines.push(("", line, ""));
}
// last line
let line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
lines.push(("", &with[last_break + 1..], &line[endcol..]));
(lines, Green)
} else {
let first_line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
let last_line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
(vec![(
&first_line[..startcol],
&with[..],
&last_line[endcol..]
)], Green)
}
},
Snippet::Orig(_) => {
let mut lines = Vec::new();
// first line
let line = expect_line(src, start.line);
let startcol = start.col.0 as usize;
lines.push((&line[..startcol], &line[startcol..], ""));
// lines in the middle
for line_idx in (start.line.0 + 1)..end.line.0 {
let line = expect_line(src, LineIdx(line_idx));
lines.push(("", line, ""));
}
// last line
let line = expect_line(src, end.line);
let endcol = end.col.0 as usize;
lines.push(("", &line[..endcol], &line[endcol..]));
(lines, Yellow)
},
_ => unreachable!(),
};
for (i, &(pre, middle, post)) in lines.iter().enumerate() {
println!("{:>#4} {} {}{}{}",
Magenta.bold().paint(start.line + LineIdx(i as u32)),
Magenta.bold().paint("|"),
pre,
color.paint(middle),
post,
);
}
}
}
fn expect_line(src: &FileMap, line: LineIdx) -> &str {
src.get_line(line).expect("`Loc` from FileMap should return a valid line")
}
|
println!(" {: <2$}{:^<3$}",
" ", "^",
startcol + 1,
underline_len,
|
random_line_split
|
magic_square.rs
|
extern crate magic_square;
use magic_square::*;
#[test]
pub fn magic_square_contains_the_postage_stamp_values() {
let mut values = create_magic_square().values;
values.sort_by(|a, b| a.partial_cmp(b).unwrap());
assert_eq!(POSTAGE_STAMP_VALUES, values);
}
|
pub fn rows_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_rows(values).iter() {
assert_eq!(magic_number, sum);
}
}
#[test]
pub fn columns_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_columns(values).iter() {
assert_eq!(magic_number, sum);
}
}
#[test]
pub fn diagonals_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_diagonals(values).iter() {
assert_eq!(magic_number, sum);
}
}
fn magic_number() -> f32 {
sum_rows(create_magic_square().values)[0]
}
fn sum_rows(values: [f32; 9]) -> [f32; 3] {
panic!("For you to do");
}
fn sum_columns(values: [f32; 9]) -> [f32; 3] {
panic!("For you to do");
}
fn sum_diagonals(values: [f32; 9]) -> [f32; 3] {
panic!("For you to do");
}
|
#[test]
|
random_line_split
|
magic_square.rs
|
extern crate magic_square;
use magic_square::*;
#[test]
pub fn magic_square_contains_the_postage_stamp_values() {
let mut values = create_magic_square().values;
values.sort_by(|a, b| a.partial_cmp(b).unwrap());
assert_eq!(POSTAGE_STAMP_VALUES, values);
}
#[test]
pub fn rows_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_rows(values).iter() {
assert_eq!(magic_number, sum);
}
}
#[test]
pub fn columns_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_columns(values).iter() {
assert_eq!(magic_number, sum);
}
}
#[test]
pub fn diagonals_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_diagonals(values).iter() {
assert_eq!(magic_number, sum);
}
}
fn magic_number() -> f32 {
sum_rows(create_magic_square().values)[0]
}
fn sum_rows(values: [f32; 9]) -> [f32; 3] {
panic!("For you to do");
}
fn
|
(values: [f32; 9]) -> [f32; 3] {
panic!("For you to do");
}
fn sum_diagonals(values: [f32; 9]) -> [f32; 3] {
panic!("For you to do");
}
|
sum_columns
|
identifier_name
|
magic_square.rs
|
extern crate magic_square;
use magic_square::*;
#[test]
pub fn magic_square_contains_the_postage_stamp_values() {
let mut values = create_magic_square().values;
values.sort_by(|a, b| a.partial_cmp(b).unwrap());
assert_eq!(POSTAGE_STAMP_VALUES, values);
}
#[test]
pub fn rows_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_rows(values).iter() {
assert_eq!(magic_number, sum);
}
}
#[test]
pub fn columns_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_columns(values).iter() {
assert_eq!(magic_number, sum);
}
}
#[test]
pub fn diagonals_sum_to_the_same_value() {
let values = create_magic_square().values;
let magic_number = magic_number();
for &sum in sum_diagonals(values).iter() {
assert_eq!(magic_number, sum);
}
}
fn magic_number() -> f32 {
sum_rows(create_magic_square().values)[0]
}
fn sum_rows(values: [f32; 9]) -> [f32; 3]
|
fn sum_columns(values: [f32; 9]) -> [f32; 3] {
panic!("For you to do");
}
fn sum_diagonals(values: [f32; 9]) -> [f32; 3] {
panic!("For you to do");
}
|
{
panic!("For you to do");
}
|
identifier_body
|
transport.rs
|
// Copyright (c) 2014, 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for implementing transport layer protocols
//!
//! The transport module provides the ability to send and receive packets at the transport layer
//! using IPv4 or IPv6. It also enables layer 3 networking for specific transport protocols, using
//! IPv4 only.
//!
//! Note that this is limited by operating system support - for example, on OS X and FreeBSD, it is
//! impossible to implement protocols which are already implemented in the kernel such as TCP and
//! UDP.
#![macro_use]
extern crate libc;
use self::TransportProtocol::{Ipv4, Ipv6};
use self::TransportChannelType::{Layer3, Layer4};
use std::io;
use std::io::Error;
use std::iter::repeat;
use std::net;
use std::mem;
use std::sync::Arc;
use packet::Packet;
use packet::ip::IpNextHeaderProtocol;
use packet::ipv4::Ipv4Packet;
use packet::udp::UdpPacket;
use internal;
/// Represents a transport layer protocol
#[derive(Clone, Copy)]
pub enum TransportProtocol {
/// Represents a transport protocol built on top of IPv4
Ipv4(IpNextHeaderProtocol),
/// Represents a transport protocol built on top of IPv6
Ipv6(IpNextHeaderProtocol)
}
/// Type of transport channel to present
#[derive(Clone, Copy)]
pub enum TransportChannelType {
/// The application will send and receive transport layer packets
Layer4(TransportProtocol),
/// The application will send and receive IPv4 packets, with the specified transport protocol
Layer3(IpNextHeaderProtocol)
}
/// Structure used for sending at the transport layer. Should be created with transport_channel()
pub struct TransportSender {
socket: Arc<internal::FileDesc>,
_channel_type: TransportChannelType
}
/// Structure used for sending at the transport layer. Should be created with transport_channel()
pub struct TransportReceiver {
socket: Arc<internal::FileDesc>,
buffer: Vec<u8>,
channel_type: TransportChannelType
}
/// Create a new (TransportSender, TransportReceiver) pair
///
/// This allows for sending and receiving packets at the transport layer. The buffer size should be
/// large enough to handle the largest packet you wish to receive.
///
/// The channel type specifies what layer to send and receive packets at, and the transport
/// protocol you wish to implement. For example, `Layer4(Ipv4(IpNextHeaderProtocols::Udp))` would
/// allow sending and receiving UDP packets using IPv4; whereas Layer3(IpNextHeaderProtocols::Udp)
/// would include the IPv4 Header in received values, and require manual construction of an IP
/// header when sending.
pub fn transport_channel(buffer_size: usize, channel_type: TransportChannelType)
-> io::Result<(TransportSender, TransportReceiver)> {
let socket = unsafe {
match channel_type {
Layer4(Ipv4(IpNextHeaderProtocol(proto))) | Layer3(IpNextHeaderProtocol(proto))
=> libc::socket(libc::AF_INET, libc::SOCK_RAW, proto as libc::c_int),
Layer4(Ipv6(IpNextHeaderProtocol(proto)))
=> libc::socket(libc::AF_INET6, libc::SOCK_RAW, proto as libc::c_int),
}
};
if socket!= -1 {
if match channel_type { Layer3(_) | Layer4(Ipv4(_)) => true, _ => false } {
let hincl: libc::c_int = match channel_type { Layer4(..) => 0, _ => 1 };
let res = unsafe {
libc::setsockopt(socket,
libc::IPPROTO_IP,
libc::IP_HDRINCL,
(&hincl as *const libc::c_int) as *const libc::c_void,
mem::size_of::<libc::c_int>() as libc::socklen_t)
};
if res == -1 {
let err = Error::last_os_error();
unsafe { internal::close(socket); }
return Err(err);
}
}
let sock = Arc::new(internal::FileDesc { fd: socket });
let sender = TransportSender {
socket: sock.clone(),
_channel_type: channel_type,
};
let receiver = TransportReceiver {
socket: sock,
buffer: repeat(0u8).take(buffer_size).collect(),
channel_type: channel_type,
};
Ok((sender, receiver))
} else {
Err(Error::last_os_error())
}
}
impl TransportSender {
fn send<T : Packet>(&mut self, packet: T, dst: net::IpAddr) -> io::Result<usize> {
let mut caddr = unsafe { mem::zeroed() };
let slen = internal::addr_to_sockaddr(net::SocketAddr::new(dst, 0), &mut caddr);
let caddr_ptr = (&caddr as *const libc::sockaddr_storage) as *const libc::sockaddr;
internal::send_to(self.socket.fd, packet.packet(), caddr_ptr, slen)
}
/// Send a packet to the provided desination
#[inline]
pub fn send_to<T : Packet>(&mut self, packet: T, destination: net::IpAddr) -> io::Result<usize> {
self.send_to_impl(packet, destination)
}
#[cfg(all(not(target_os = "freebsd"), not(target_os = "macos")))]
fn send_to_impl<T : Packet>(&mut self, packet: T, dst: net::IpAddr) -> io::Result<usize> {
self.send(packet, dst)
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn
|
<T : Packet>(&mut self, packet: T, dst: net::IpAddr) -> io::Result<usize> {
use packet::ipv4::MutableIpv4Packet;
// FreeBSD and OS X expect total length and fragment offset fields of IPv4 packets to be in
// host byte order rather than network byte order (man 4 ip/Raw IP Sockets)
if match self._channel_type { Layer3(..) => true, _ => false } {
let mut mut_slice: Vec<u8> = repeat(0u8).take(packet.packet().len()).collect();
mut_slice.as_mut_slice().clone_from_slice(packet.packet());
let mut new_packet = MutableIpv4Packet::new(&mut mut_slice[..]).unwrap();
let length = new_packet.get_total_length().to_be();
new_packet.set_total_length(length);
let offset = new_packet.get_fragment_offset().to_be();
new_packet.set_fragment_offset(offset);
return self.send(new_packet, dst);
}
self.send(packet, dst)
}
}
/// Create an iterator for some packet type.
///
/// Usage:
/// ```
/// transport_channel_iterator!(Ipv4Packet, // Type to iterate over
/// Ipv4TransportChannelIterator, // Name for iterator struct
/// ipv4_packet_iter) // Name of function to create iterator
/// ```
#[macro_export]
macro_rules! transport_channel_iterator {
($ty:ident, $iter:ident, $func:ident) => (
/// An iterator over packets of type $ty
pub struct $iter<'a> {
tr: &'a mut TransportReceiver
}
/// Return a packet iterator with packets of type $ty for some transport receiver
pub fn $func<'a>(tr: &'a mut TransportReceiver) -> $iter<'a> {
$iter {
tr: tr
}
}
impl<'a> $iter<'a> {
/// Get the next ($ty, IpAddr) pair for the given channel
pub fn next<'c>(&'c mut self) -> io::Result<($ty, net::IpAddr)> {
let mut caddr: libc::sockaddr_storage = unsafe { mem::zeroed() };
let res = internal::recv_from(self.tr.socket.fd, &mut self.tr.buffer[..], &mut caddr);
let offset = match self.tr.channel_type {
Layer4(Ipv4(_)) => {
let ip_header = Ipv4Packet::new(&self.tr.buffer[..]).unwrap();
ip_header.get_header_length() as usize * 4usize
},
Layer3(_) => {
fixup_packet(&mut self.tr.buffer[..]);
0
},
_ => 0
};
return match res {
Ok(len) => {
let packet = $ty::new(&self.tr.buffer[offset..len]).unwrap();
let addr = internal::sockaddr_to_addr(
&caddr,
mem::size_of::<libc::sockaddr_storage>()
);
Ok((packet, addr.unwrap().ip()))
},
Err(e) => Err(e),
};
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn fixup_packet(buffer: &mut [u8]) {
use packet::ipv4::MutableIpv4Packet;
let buflen = buffer.len();
let mut new_packet = MutableIpv4Packet::new(buffer).unwrap();
let length = u16::from_be(new_packet.get_total_length());
new_packet.set_total_length(length);
// OS X does this awesome thing where it removes the header length
// from the total length sometimes.
let length = new_packet.get_total_length() as usize +
(new_packet.get_header_length() as usize * 4usize);
if length == buflen {
new_packet.set_total_length(length as u16)
}
let offset = u16::from_be(new_packet.get_fragment_offset());
new_packet.set_fragment_offset(offset);
}
#[cfg(all(not(target_os = "freebsd"), not(target_os = "macos")))]
fn fixup_packet(_buffer: &mut [u8]) {}
}
}
)
}
transport_channel_iterator!(Ipv4Packet,
Ipv4TransportChannelIterator,
ipv4_packet_iter);
transport_channel_iterator!(UdpPacket,
UdpTransportChannelIterator,
udp_packet_iter);
|
send_to_impl
|
identifier_name
|
transport.rs
|
// Copyright (c) 2014, 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for implementing transport layer protocols
//!
//! The transport module provides the ability to send and receive packets at the transport layer
//! using IPv4 or IPv6. It also enables layer 3 networking for specific transport protocols, using
//! IPv4 only.
//!
//! Note that this is limited by operating system support - for example, on OS X and FreeBSD, it is
//! impossible to implement protocols which are already implemented in the kernel such as TCP and
//! UDP.
#![macro_use]
extern crate libc;
use self::TransportProtocol::{Ipv4, Ipv6};
use self::TransportChannelType::{Layer3, Layer4};
use std::io;
use std::io::Error;
use std::iter::repeat;
use std::net;
use std::mem;
use std::sync::Arc;
use packet::Packet;
use packet::ip::IpNextHeaderProtocol;
use packet::ipv4::Ipv4Packet;
use packet::udp::UdpPacket;
use internal;
/// Represents a transport layer protocol
#[derive(Clone, Copy)]
pub enum TransportProtocol {
/// Represents a transport protocol built on top of IPv4
Ipv4(IpNextHeaderProtocol),
/// Represents a transport protocol built on top of IPv6
Ipv6(IpNextHeaderProtocol)
}
/// Type of transport channel to present
#[derive(Clone, Copy)]
pub enum TransportChannelType {
/// The application will send and receive transport layer packets
Layer4(TransportProtocol),
/// The application will send and receive IPv4 packets, with the specified transport protocol
Layer3(IpNextHeaderProtocol)
}
/// Structure used for sending at the transport layer. Should be created with transport_channel()
pub struct TransportSender {
socket: Arc<internal::FileDesc>,
_channel_type: TransportChannelType
}
/// Structure used for sending at the transport layer. Should be created with transport_channel()
pub struct TransportReceiver {
socket: Arc<internal::FileDesc>,
buffer: Vec<u8>,
channel_type: TransportChannelType
}
/// Create a new (TransportSender, TransportReceiver) pair
///
/// This allows for sending and receiving packets at the transport layer. The buffer size should be
/// large enough to handle the largest packet you wish to receive.
///
/// The channel type specifies what layer to send and receive packets at, and the transport
/// protocol you wish to implement. For example, `Layer4(Ipv4(IpNextHeaderProtocols::Udp))` would
/// allow sending and receiving UDP packets using IPv4; whereas Layer3(IpNextHeaderProtocols::Udp)
/// would include the IPv4 Header in received values, and require manual construction of an IP
/// header when sending.
pub fn transport_channel(buffer_size: usize, channel_type: TransportChannelType)
-> io::Result<(TransportSender, TransportReceiver)> {
let socket = unsafe {
match channel_type {
Layer4(Ipv4(IpNextHeaderProtocol(proto))) | Layer3(IpNextHeaderProtocol(proto))
=> libc::socket(libc::AF_INET, libc::SOCK_RAW, proto as libc::c_int),
Layer4(Ipv6(IpNextHeaderProtocol(proto)))
=> libc::socket(libc::AF_INET6, libc::SOCK_RAW, proto as libc::c_int),
}
};
if socket!= -1 {
if match channel_type { Layer3(_) | Layer4(Ipv4(_)) => true, _ => false } {
let hincl: libc::c_int = match channel_type { Layer4(..) => 0, _ => 1 };
let res = unsafe {
libc::setsockopt(socket,
libc::IPPROTO_IP,
libc::IP_HDRINCL,
(&hincl as *const libc::c_int) as *const libc::c_void,
mem::size_of::<libc::c_int>() as libc::socklen_t)
};
if res == -1 {
let err = Error::last_os_error();
unsafe { internal::close(socket); }
return Err(err);
}
}
let sock = Arc::new(internal::FileDesc { fd: socket });
let sender = TransportSender {
socket: sock.clone(),
_channel_type: channel_type,
};
let receiver = TransportReceiver {
socket: sock,
buffer: repeat(0u8).take(buffer_size).collect(),
channel_type: channel_type,
};
Ok((sender, receiver))
} else {
Err(Error::last_os_error())
}
}
impl TransportSender {
fn send<T : Packet>(&mut self, packet: T, dst: net::IpAddr) -> io::Result<usize> {
let mut caddr = unsafe { mem::zeroed() };
let slen = internal::addr_to_sockaddr(net::SocketAddr::new(dst, 0), &mut caddr);
let caddr_ptr = (&caddr as *const libc::sockaddr_storage) as *const libc::sockaddr;
internal::send_to(self.socket.fd, packet.packet(), caddr_ptr, slen)
}
/// Send a packet to the provided desination
#[inline]
pub fn send_to<T : Packet>(&mut self, packet: T, destination: net::IpAddr) -> io::Result<usize> {
self.send_to_impl(packet, destination)
}
#[cfg(all(not(target_os = "freebsd"), not(target_os = "macos")))]
fn send_to_impl<T : Packet>(&mut self, packet: T, dst: net::IpAddr) -> io::Result<usize> {
self.send(packet, dst)
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn send_to_impl<T : Packet>(&mut self, packet: T, dst: net::IpAddr) -> io::Result<usize> {
use packet::ipv4::MutableIpv4Packet;
// FreeBSD and OS X expect total length and fragment offset fields of IPv4 packets to be in
// host byte order rather than network byte order (man 4 ip/Raw IP Sockets)
if match self._channel_type { Layer3(..) => true, _ => false } {
let mut mut_slice: Vec<u8> = repeat(0u8).take(packet.packet().len()).collect();
mut_slice.as_mut_slice().clone_from_slice(packet.packet());
let mut new_packet = MutableIpv4Packet::new(&mut mut_slice[..]).unwrap();
let length = new_packet.get_total_length().to_be();
new_packet.set_total_length(length);
let offset = new_packet.get_fragment_offset().to_be();
new_packet.set_fragment_offset(offset);
return self.send(new_packet, dst);
}
self.send(packet, dst)
}
}
/// Create an iterator for some packet type.
///
/// Usage:
/// ```
/// transport_channel_iterator!(Ipv4Packet, // Type to iterate over
/// Ipv4TransportChannelIterator, // Name for iterator struct
|
macro_rules! transport_channel_iterator {
($ty:ident, $iter:ident, $func:ident) => (
/// An iterator over packets of type $ty
pub struct $iter<'a> {
tr: &'a mut TransportReceiver
}
/// Return a packet iterator with packets of type $ty for some transport receiver
pub fn $func<'a>(tr: &'a mut TransportReceiver) -> $iter<'a> {
$iter {
tr: tr
}
}
impl<'a> $iter<'a> {
/// Get the next ($ty, IpAddr) pair for the given channel
pub fn next<'c>(&'c mut self) -> io::Result<($ty, net::IpAddr)> {
let mut caddr: libc::sockaddr_storage = unsafe { mem::zeroed() };
let res = internal::recv_from(self.tr.socket.fd, &mut self.tr.buffer[..], &mut caddr);
let offset = match self.tr.channel_type {
Layer4(Ipv4(_)) => {
let ip_header = Ipv4Packet::new(&self.tr.buffer[..]).unwrap();
ip_header.get_header_length() as usize * 4usize
},
Layer3(_) => {
fixup_packet(&mut self.tr.buffer[..]);
0
},
_ => 0
};
return match res {
Ok(len) => {
let packet = $ty::new(&self.tr.buffer[offset..len]).unwrap();
let addr = internal::sockaddr_to_addr(
&caddr,
mem::size_of::<libc::sockaddr_storage>()
);
Ok((packet, addr.unwrap().ip()))
},
Err(e) => Err(e),
};
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn fixup_packet(buffer: &mut [u8]) {
use packet::ipv4::MutableIpv4Packet;
let buflen = buffer.len();
let mut new_packet = MutableIpv4Packet::new(buffer).unwrap();
let length = u16::from_be(new_packet.get_total_length());
new_packet.set_total_length(length);
// OS X does this awesome thing where it removes the header length
// from the total length sometimes.
let length = new_packet.get_total_length() as usize +
(new_packet.get_header_length() as usize * 4usize);
if length == buflen {
new_packet.set_total_length(length as u16)
}
let offset = u16::from_be(new_packet.get_fragment_offset());
new_packet.set_fragment_offset(offset);
}
#[cfg(all(not(target_os = "freebsd"), not(target_os = "macos")))]
fn fixup_packet(_buffer: &mut [u8]) {}
}
}
)
}
transport_channel_iterator!(Ipv4Packet,
Ipv4TransportChannelIterator,
ipv4_packet_iter);
transport_channel_iterator!(UdpPacket,
UdpTransportChannelIterator,
udp_packet_iter);
|
/// ipv4_packet_iter) // Name of function to create iterator
/// ```
#[macro_export]
|
random_line_split
|
crypto.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethcore crypto.
use numbers::*;
use bytes::*;
use secp256k1::{Secp256k1, key};
use rand::os::OsRng;
use sha3::Hashable;
/// Secret key for secp256k1 EC operations. 256 bit generic "hash" data.
pub type Secret = H256;
/// Public key for secp256k1 EC operations. 512 bit generic "hash" data.
pub type Public = H512;
/// Signature for secp256k1 EC operations; encodes two 256-bit curve points
/// and a third sign bit. 520 bit generic "hash" data.
pub type Signature = H520;
lazy_static! {
static ref SECP256K1: Secp256k1 = Secp256k1::new();
}
impl Signature {
/// Create a new signature from the R, S and V componenets.
pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Signature {
use std::ptr;
let mut ret: Signature = Signature::new();
unsafe {
let retslice: &mut [u8] = &mut ret;
ptr::copy(r.as_ptr(), retslice.as_mut_ptr(), 32);
ptr::copy(s.as_ptr(), retslice.as_mut_ptr().offset(32), 32);
}
ret[64] = v;
ret
}
/// Convert transaction to R, S and V components.
pub fn to_rsv(&self) -> (U256, U256, u8) {
(U256::from(&self.as_slice()[0..32]), U256::from(&self.as_slice()[32..64]), self[64])
}
}
#[derive(Debug)]
/// Crypto error
pub enum CryptoError {
/// Invalid secret key
InvalidSecret,
/// Invalid public key
InvalidPublic,
/// Invalid EC signature
InvalidSignature,
/// Invalid AES message
InvalidMessage,
/// IO Error
Io(::std::io::Error),
}
impl From<::secp256k1::Error> for CryptoError {
fn from(e: ::secp256k1::Error) -> CryptoError {
match e {
::secp256k1::Error::InvalidMessage => CryptoError::InvalidMessage,
::secp256k1::Error::InvalidPublicKey => CryptoError::InvalidPublic,
::secp256k1::Error::InvalidSecretKey => CryptoError::InvalidSecret,
_ => CryptoError::InvalidSignature,
}
}
}
impl From<::std::io::Error> for CryptoError {
fn from(err: ::std::io::Error) -> CryptoError {
CryptoError::Io(err)
}
}
#[derive(Debug, PartialEq, Eq)]
/// secp256k1 Key pair
///
/// Use `create()` to create a new random key pair.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::crypto::*;
/// use ethcore_util::hash::*;
/// fn main() {
/// let pair = KeyPair::create().unwrap();
/// let message = H256::random();
/// let signature = ec::sign(pair.secret(), &message).unwrap();
///
/// assert!(ec::verify(pair.public(), &signature, &message).unwrap());
/// assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public());
/// }
/// ```
pub struct KeyPair {
secret: Secret,
public: Public,
}
impl KeyPair {
/// Create a pair from secret key
pub fn from_secret(secret: Secret) -> Result<KeyPair, CryptoError> {
let context = &SECP256K1;
let s: key::SecretKey = try!(key::SecretKey::from_slice(context, &secret));
let pub_key = try!(key::PublicKey::from_secret_key(context, &s));
let serialized = pub_key.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
Ok(KeyPair {
secret: secret,
public: p,
})
}
/// Create a new random key pair
pub fn create() -> Result<KeyPair, CryptoError> {
let context = &SECP256K1;
let mut rng = try!(OsRng::new());
let (sec, publ) = try!(context.generate_keypair(&mut rng));
let serialized = publ.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
let s: Secret = unsafe { ::std::mem::transmute(sec) };
Ok(KeyPair {
secret: s,
public: p,
})
}
/// Returns public key
pub fn public(&self) -> &Public {
&self.public
}
/// Returns private key
pub fn secret(&self) -> &Secret {
&self.secret
}
/// Returns address.
pub fn address(&self) -> Address {
Address::from(self.public.sha3())
}
/// Sign a message with our secret key.
pub fn sign(&self, message: &H256) -> Result<Signature, CryptoError> {
ec::sign(&self.secret, message)
}
}
/// EC functions
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ec {
use numbers::*;
use standard::*;
use crypto::*;
use crypto;
/// Recovers Public key from signed message hash.
pub fn recover(signature: &Signature, message: &H256) -> Result<Public, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let rsig = try!(RecoverableSignature::from_compact(context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32))));
let publ = try!(context.recover(&try!(Message::from_slice(&message)), &rsig));
let serialized = publ.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
// TODO: check if it's the zero key and fail if so.
Ok(p)
}
/// Returns siganture of message hash.
pub fn sign(secret: &Secret, message: &H256) -> Result<Signature, CryptoError> {
// TODO: allow creation of only low-s signatures.
use secp256k1::*;
let context = &crypto::SECP256K1;
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let s = try!(context.sign_recoverable(&try!(Message::from_slice(&message)), sec));
let (rec_id, data) = s.serialize_compact(context);
let mut signature: crypto::Signature = unsafe { ::std::mem::uninitialized() };
signature.clone_from_slice(&data);
signature[64] = rec_id.to_i32() as u8;
let (_, s, v) = signature.to_rsv();
let secp256k1n = U256::from_str("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141").unwrap();
if!is_low_s(&s) {
signature = super::Signature::from_rsv(&H256::from_slice(&signature[0..32]), &H256::from(secp256k1n - s), v ^ 1);
}
Ok(signature)
}
/// Verify signature.
pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result<bool, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let rsig = try!(RecoverableSignature::from_compact(context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32))));
let sig = rsig.to_standard(context);
let mut pdata: [u8; 65] = [4u8; 65];
let ptr = pdata[1..].as_mut_ptr();
let src = public.as_ptr();
unsafe { ::std::ptr::copy_nonoverlapping(src, ptr, 64) };
let publ = try!(key::PublicKey::from_slice(context, &pdata));
match context.verify(&try!(Message::from_slice(&message)), &sig, &publ) {
Ok(_) => Ok(true),
Err(Error::IncorrectSignature) => Ok(false),
Err(x) => Err(CryptoError::from(x)),
}
}
/// Check if this is a "low" signature.
pub fn is_low(sig: &Signature) -> bool {
H256::from_slice(&sig[32..64]) <= h256_from_hex("7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0")
}
/// Check if this is a "low" signature.
pub fn is_low_s(s: &U256) -> bool {
s <= &U256::from_str("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0").unwrap()
}
/// Check if each component of the signature is in range.
pub fn is_valid(sig: &Signature) -> bool
|
}
/// ECDH functions
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ecdh {
use crypto::*;
use crypto;
/// Agree on a shared secret
pub fn agree(secret: &Secret, public: &Public) -> Result<Secret, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let mut pdata: [u8; 65] = [4u8; 65];
let ptr = pdata[1..].as_mut_ptr();
let src = public.as_ptr();
unsafe { ::std::ptr::copy_nonoverlapping(src, ptr, 64) };
let publ = try!(key::PublicKey::from_slice(context, &pdata));
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec);
let s: Secret = unsafe { ::std::mem::transmute(shared) };
Ok(s)
}
}
/// ECIES function
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ecies {
use hash::*;
use bytes::*;
use crypto::*;
/// Encrypt a message with a public key
pub fn encrypt(public: &Public, shared_mac: &[u8], plain: &[u8]) -> Result<Bytes, CryptoError> {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
use rcrypto::hmac::Hmac;
use rcrypto::mac::Mac;
let r = try!(KeyPair::create());
let z = try!(ecdh::agree(r.secret(), public));
let mut key = [0u8; 32];
let mut mkey = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let mut hasher = Sha256::new();
let mkey_material = &key[16..32];
hasher.input(mkey_material);
hasher.result(&mut mkey);
let ekey = &key[0..16];
let mut msg = vec![0u8; (1 + 64 + 16 + plain.len() + 32)];
msg[0] = 0x04u8;
{
let msgd = &mut msg[1..];
r.public().copy_to(&mut msgd[0..64]);
{
let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())];
aes::encrypt(ekey, &H128::new(), plain, cipher);
}
let mut hmac = Hmac::new(Sha256::new(), &mkey);
{
let cipher_iv = &msgd[64..(64 + 16 + plain.len())];
hmac.input(cipher_iv);
}
hmac.input(shared_mac);
hmac.raw_result(&mut msgd[(64 + 16 + plain.len())..]);
}
Ok(msg)
}
/// Decrypt a message with a secret key
pub fn decrypt(secret: &Secret, shared_mac: &[u8], encrypted: &[u8]) -> Result<Bytes, CryptoError> {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
use rcrypto::hmac::Hmac;
use rcrypto::mac::Mac;
let meta_len = 1 + 64 + 16 + 32;
if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 {
return Err(CryptoError::InvalidMessage); //invalid message: publickey
}
let e = &encrypted[1..];
let p = Public::from_slice(&e[0..64]);
let z = try!(ecdh::agree(secret, &p));
let mut key = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let ekey = &key[0..16];
let mkey_material = &key[16..32];
let mut hasher = Sha256::new();
let mut mkey = [0u8; 32];
hasher.input(mkey_material);
hasher.result(&mut mkey);
let clen = encrypted.len() - meta_len;
let cipher_with_iv = &e[64..(64 + 16 + clen)];
let cipher_iv = &cipher_with_iv[0..16];
let cipher_no_iv = &cipher_with_iv[16..];
let msg_mac = &e[(64 + 16 + clen)..];
// Verify tag
let mut hmac = Hmac::new(Sha256::new(), &mkey);
hmac.input(cipher_with_iv);
hmac.input(shared_mac);
let mut mac = H256::new();
hmac.raw_result(&mut mac);
if &mac[..]!= msg_mac {
return Err(CryptoError::InvalidMessage);
}
let mut msg = vec![0u8; clen];
aes::decrypt(ekey, cipher_iv, cipher_no_iv, &mut msg[..]);
Ok(msg)
}
fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
let mut hasher = Sha256::new();
// SEC/ISO/Shoup specify counter size SHOULD be equivalent
// to size of hash output, however, it also notes that
// the 4 bytes is okay. NIST specifies 4 bytes.
let mut ctr = 1u32;
let mut written = 0usize;
while written < dest.len() {
let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8];
hasher.input(&ctrs);
hasher.input(secret);
hasher.input(s1);
hasher.result(&mut dest[written..(written + 32)]);
hasher.reset();
written += 32;
ctr += 1;
}
}
}
/// AES encryption
pub mod aes {
use ::rcrypto::blockmodes::*;
use ::rcrypto::aessafe::*;
use ::rcrypto::symmetriccipher::*;
use ::rcrypto::buffer::*;
/// Encrypt a message
pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding");
}
/// Decrypt a message
pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding");
}
}
#[cfg(test)]
mod tests {
use hash::*;
use crypto::*;
// TODO: tests for sign/recover roundtrip, at least.
#[test]
fn test_signature() {
let pair = KeyPair::create().unwrap();
let message = H256::random();
let signature = ec::sign(pair.secret(), &message).unwrap();
assert!(ec::verify(pair.public(), &signature, &message).unwrap());
assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public());
}
#[test]
fn test_invalid_key() {
assert!(KeyPair::from_secret(h256_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")).is_err());
assert!(KeyPair::from_secret(h256_from_hex("0000000000000000000000000000000000000000000000000000000000000000")).is_err());
assert!(KeyPair::from_secret(h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141")).is_err());
}
#[test]
fn test_key() {
let pair = KeyPair::from_secret(h256_from_hex("6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2")).unwrap();
assert_eq!(pair.public().hex(), "101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c");
}
#[test]
fn ecies_shared() {
let kp = KeyPair::create().unwrap();
let message = b"So many books, so little time";
let shared = b"shared";
let wrong_shared = b"incorrect";
let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap();
assert!(encrypted[..]!= message[..]);
assert_eq!(encrypted[0], 0x04);
assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err());
let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap();
assert_eq!(decrypted[..message.len()], message[..]);
}
}
|
{
sig[64] <= 1 && H256::from_slice(&sig[0..32]) < h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141") && H256::from_slice(&sig[32..64]) < h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141") && H256::from_slice(&sig[32..64]) >= h256_from_u64(1) && H256::from_slice(&sig[0..32]) >= h256_from_u64(1)
}
|
identifier_body
|
crypto.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethcore crypto.
use numbers::*;
use bytes::*;
use secp256k1::{Secp256k1, key};
use rand::os::OsRng;
use sha3::Hashable;
/// Secret key for secp256k1 EC operations. 256 bit generic "hash" data.
pub type Secret = H256;
/// Public key for secp256k1 EC operations. 512 bit generic "hash" data.
pub type Public = H512;
/// Signature for secp256k1 EC operations; encodes two 256-bit curve points
/// and a third sign bit. 520 bit generic "hash" data.
pub type Signature = H520;
lazy_static! {
static ref SECP256K1: Secp256k1 = Secp256k1::new();
}
impl Signature {
/// Create a new signature from the R, S and V componenets.
pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Signature {
use std::ptr;
let mut ret: Signature = Signature::new();
unsafe {
let retslice: &mut [u8] = &mut ret;
ptr::copy(r.as_ptr(), retslice.as_mut_ptr(), 32);
ptr::copy(s.as_ptr(), retslice.as_mut_ptr().offset(32), 32);
}
ret[64] = v;
ret
}
/// Convert transaction to R, S and V components.
pub fn to_rsv(&self) -> (U256, U256, u8) {
(U256::from(&self.as_slice()[0..32]), U256::from(&self.as_slice()[32..64]), self[64])
}
}
#[derive(Debug)]
/// Crypto error
pub enum CryptoError {
/// Invalid secret key
InvalidSecret,
/// Invalid public key
InvalidPublic,
/// Invalid EC signature
InvalidSignature,
/// Invalid AES message
InvalidMessage,
/// IO Error
Io(::std::io::Error),
}
impl From<::secp256k1::Error> for CryptoError {
fn from(e: ::secp256k1::Error) -> CryptoError {
match e {
::secp256k1::Error::InvalidMessage => CryptoError::InvalidMessage,
::secp256k1::Error::InvalidPublicKey => CryptoError::InvalidPublic,
::secp256k1::Error::InvalidSecretKey => CryptoError::InvalidSecret,
_ => CryptoError::InvalidSignature,
}
}
}
impl From<::std::io::Error> for CryptoError {
fn from(err: ::std::io::Error) -> CryptoError {
CryptoError::Io(err)
}
}
#[derive(Debug, PartialEq, Eq)]
/// secp256k1 Key pair
///
/// Use `create()` to create a new random key pair.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::crypto::*;
/// use ethcore_util::hash::*;
/// fn main() {
/// let pair = KeyPair::create().unwrap();
/// let message = H256::random();
/// let signature = ec::sign(pair.secret(), &message).unwrap();
///
/// assert!(ec::verify(pair.public(), &signature, &message).unwrap());
/// assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public());
/// }
/// ```
pub struct KeyPair {
secret: Secret,
public: Public,
}
impl KeyPair {
/// Create a pair from secret key
pub fn from_secret(secret: Secret) -> Result<KeyPair, CryptoError> {
let context = &SECP256K1;
let s: key::SecretKey = try!(key::SecretKey::from_slice(context, &secret));
let pub_key = try!(key::PublicKey::from_secret_key(context, &s));
let serialized = pub_key.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
Ok(KeyPair {
secret: secret,
public: p,
})
}
/// Create a new random key pair
pub fn create() -> Result<KeyPair, CryptoError> {
let context = &SECP256K1;
let mut rng = try!(OsRng::new());
let (sec, publ) = try!(context.generate_keypair(&mut rng));
let serialized = publ.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
let s: Secret = unsafe { ::std::mem::transmute(sec) };
Ok(KeyPair {
secret: s,
public: p,
})
}
/// Returns public key
pub fn public(&self) -> &Public {
&self.public
}
/// Returns private key
pub fn secret(&self) -> &Secret {
&self.secret
}
/// Returns address.
pub fn address(&self) -> Address {
Address::from(self.public.sha3())
}
/// Sign a message with our secret key.
pub fn sign(&self, message: &H256) -> Result<Signature, CryptoError> {
ec::sign(&self.secret, message)
}
}
/// EC functions
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ec {
use numbers::*;
use standard::*;
use crypto::*;
use crypto;
/// Recovers Public key from signed message hash.
pub fn recover(signature: &Signature, message: &H256) -> Result<Public, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let rsig = try!(RecoverableSignature::from_compact(context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32))));
let publ = try!(context.recover(&try!(Message::from_slice(&message)), &rsig));
let serialized = publ.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
// TODO: check if it's the zero key and fail if so.
Ok(p)
}
/// Returns siganture of message hash.
pub fn sign(secret: &Secret, message: &H256) -> Result<Signature, CryptoError> {
// TODO: allow creation of only low-s signatures.
use secp256k1::*;
let context = &crypto::SECP256K1;
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let s = try!(context.sign_recoverable(&try!(Message::from_slice(&message)), sec));
let (rec_id, data) = s.serialize_compact(context);
let mut signature: crypto::Signature = unsafe { ::std::mem::uninitialized() };
signature.clone_from_slice(&data);
signature[64] = rec_id.to_i32() as u8;
let (_, s, v) = signature.to_rsv();
let secp256k1n = U256::from_str("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141").unwrap();
if!is_low_s(&s) {
signature = super::Signature::from_rsv(&H256::from_slice(&signature[0..32]), &H256::from(secp256k1n - s), v ^ 1);
}
Ok(signature)
}
/// Verify signature.
pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result<bool, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let rsig = try!(RecoverableSignature::from_compact(context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32))));
let sig = rsig.to_standard(context);
let mut pdata: [u8; 65] = [4u8; 65];
let ptr = pdata[1..].as_mut_ptr();
let src = public.as_ptr();
unsafe { ::std::ptr::copy_nonoverlapping(src, ptr, 64) };
let publ = try!(key::PublicKey::from_slice(context, &pdata));
match context.verify(&try!(Message::from_slice(&message)), &sig, &publ) {
Ok(_) => Ok(true),
Err(Error::IncorrectSignature) => Ok(false),
Err(x) => Err(CryptoError::from(x)),
}
}
/// Check if this is a "low" signature.
pub fn is_low(sig: &Signature) -> bool {
H256::from_slice(&sig[32..64]) <= h256_from_hex("7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0")
}
/// Check if this is a "low" signature.
pub fn is_low_s(s: &U256) -> bool {
s <= &U256::from_str("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0").unwrap()
}
/// Check if each component of the signature is in range.
pub fn
|
(sig: &Signature) -> bool {
sig[64] <= 1 && H256::from_slice(&sig[0..32]) < h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141") && H256::from_slice(&sig[32..64]) < h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141") && H256::from_slice(&sig[32..64]) >= h256_from_u64(1) && H256::from_slice(&sig[0..32]) >= h256_from_u64(1)
}
}
/// ECDH functions
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ecdh {
use crypto::*;
use crypto;
/// Agree on a shared secret
pub fn agree(secret: &Secret, public: &Public) -> Result<Secret, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let mut pdata: [u8; 65] = [4u8; 65];
let ptr = pdata[1..].as_mut_ptr();
let src = public.as_ptr();
unsafe { ::std::ptr::copy_nonoverlapping(src, ptr, 64) };
let publ = try!(key::PublicKey::from_slice(context, &pdata));
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec);
let s: Secret = unsafe { ::std::mem::transmute(shared) };
Ok(s)
}
}
/// ECIES function
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ecies {
use hash::*;
use bytes::*;
use crypto::*;
/// Encrypt a message with a public key
pub fn encrypt(public: &Public, shared_mac: &[u8], plain: &[u8]) -> Result<Bytes, CryptoError> {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
use rcrypto::hmac::Hmac;
use rcrypto::mac::Mac;
let r = try!(KeyPair::create());
let z = try!(ecdh::agree(r.secret(), public));
let mut key = [0u8; 32];
let mut mkey = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let mut hasher = Sha256::new();
let mkey_material = &key[16..32];
hasher.input(mkey_material);
hasher.result(&mut mkey);
let ekey = &key[0..16];
let mut msg = vec![0u8; (1 + 64 + 16 + plain.len() + 32)];
msg[0] = 0x04u8;
{
let msgd = &mut msg[1..];
r.public().copy_to(&mut msgd[0..64]);
{
let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())];
aes::encrypt(ekey, &H128::new(), plain, cipher);
}
let mut hmac = Hmac::new(Sha256::new(), &mkey);
{
let cipher_iv = &msgd[64..(64 + 16 + plain.len())];
hmac.input(cipher_iv);
}
hmac.input(shared_mac);
hmac.raw_result(&mut msgd[(64 + 16 + plain.len())..]);
}
Ok(msg)
}
/// Decrypt a message with a secret key
pub fn decrypt(secret: &Secret, shared_mac: &[u8], encrypted: &[u8]) -> Result<Bytes, CryptoError> {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
use rcrypto::hmac::Hmac;
use rcrypto::mac::Mac;
let meta_len = 1 + 64 + 16 + 32;
if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 {
return Err(CryptoError::InvalidMessage); //invalid message: publickey
}
let e = &encrypted[1..];
let p = Public::from_slice(&e[0..64]);
let z = try!(ecdh::agree(secret, &p));
let mut key = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let ekey = &key[0..16];
let mkey_material = &key[16..32];
let mut hasher = Sha256::new();
let mut mkey = [0u8; 32];
hasher.input(mkey_material);
hasher.result(&mut mkey);
let clen = encrypted.len() - meta_len;
let cipher_with_iv = &e[64..(64 + 16 + clen)];
let cipher_iv = &cipher_with_iv[0..16];
let cipher_no_iv = &cipher_with_iv[16..];
let msg_mac = &e[(64 + 16 + clen)..];
// Verify tag
let mut hmac = Hmac::new(Sha256::new(), &mkey);
hmac.input(cipher_with_iv);
hmac.input(shared_mac);
let mut mac = H256::new();
hmac.raw_result(&mut mac);
if &mac[..]!= msg_mac {
return Err(CryptoError::InvalidMessage);
}
let mut msg = vec![0u8; clen];
aes::decrypt(ekey, cipher_iv, cipher_no_iv, &mut msg[..]);
Ok(msg)
}
fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
let mut hasher = Sha256::new();
// SEC/ISO/Shoup specify counter size SHOULD be equivalent
// to size of hash output, however, it also notes that
// the 4 bytes is okay. NIST specifies 4 bytes.
let mut ctr = 1u32;
let mut written = 0usize;
while written < dest.len() {
let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8];
hasher.input(&ctrs);
hasher.input(secret);
hasher.input(s1);
hasher.result(&mut dest[written..(written + 32)]);
hasher.reset();
written += 32;
ctr += 1;
}
}
}
/// AES encryption
pub mod aes {
use ::rcrypto::blockmodes::*;
use ::rcrypto::aessafe::*;
use ::rcrypto::symmetriccipher::*;
use ::rcrypto::buffer::*;
/// Encrypt a message
pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding");
}
/// Decrypt a message
pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding");
}
}
#[cfg(test)]
mod tests {
use hash::*;
use crypto::*;
// TODO: tests for sign/recover roundtrip, at least.
#[test]
fn test_signature() {
let pair = KeyPair::create().unwrap();
let message = H256::random();
let signature = ec::sign(pair.secret(), &message).unwrap();
assert!(ec::verify(pair.public(), &signature, &message).unwrap());
assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public());
}
#[test]
fn test_invalid_key() {
assert!(KeyPair::from_secret(h256_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")).is_err());
assert!(KeyPair::from_secret(h256_from_hex("0000000000000000000000000000000000000000000000000000000000000000")).is_err());
assert!(KeyPair::from_secret(h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141")).is_err());
}
#[test]
fn test_key() {
let pair = KeyPair::from_secret(h256_from_hex("6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2")).unwrap();
assert_eq!(pair.public().hex(), "101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c");
}
#[test]
fn ecies_shared() {
let kp = KeyPair::create().unwrap();
let message = b"So many books, so little time";
let shared = b"shared";
let wrong_shared = b"incorrect";
let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap();
assert!(encrypted[..]!= message[..]);
assert_eq!(encrypted[0], 0x04);
assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err());
let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap();
assert_eq!(decrypted[..message.len()], message[..]);
}
}
|
is_valid
|
identifier_name
|
crypto.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethcore crypto.
use numbers::*;
use bytes::*;
use secp256k1::{Secp256k1, key};
use rand::os::OsRng;
use sha3::Hashable;
/// Secret key for secp256k1 EC operations. 256 bit generic "hash" data.
pub type Secret = H256;
/// Public key for secp256k1 EC operations. 512 bit generic "hash" data.
pub type Public = H512;
/// Signature for secp256k1 EC operations; encodes two 256-bit curve points
/// and a third sign bit. 520 bit generic "hash" data.
pub type Signature = H520;
lazy_static! {
static ref SECP256K1: Secp256k1 = Secp256k1::new();
}
impl Signature {
/// Create a new signature from the R, S and V componenets.
pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Signature {
use std::ptr;
let mut ret: Signature = Signature::new();
unsafe {
let retslice: &mut [u8] = &mut ret;
ptr::copy(r.as_ptr(), retslice.as_mut_ptr(), 32);
ptr::copy(s.as_ptr(), retslice.as_mut_ptr().offset(32), 32);
}
ret[64] = v;
ret
}
/// Convert transaction to R, S and V components.
pub fn to_rsv(&self) -> (U256, U256, u8) {
(U256::from(&self.as_slice()[0..32]), U256::from(&self.as_slice()[32..64]), self[64])
}
}
#[derive(Debug)]
/// Crypto error
|
/// Invalid EC signature
InvalidSignature,
/// Invalid AES message
InvalidMessage,
/// IO Error
Io(::std::io::Error),
}
impl From<::secp256k1::Error> for CryptoError {
fn from(e: ::secp256k1::Error) -> CryptoError {
match e {
::secp256k1::Error::InvalidMessage => CryptoError::InvalidMessage,
::secp256k1::Error::InvalidPublicKey => CryptoError::InvalidPublic,
::secp256k1::Error::InvalidSecretKey => CryptoError::InvalidSecret,
_ => CryptoError::InvalidSignature,
}
}
}
impl From<::std::io::Error> for CryptoError {
fn from(err: ::std::io::Error) -> CryptoError {
CryptoError::Io(err)
}
}
#[derive(Debug, PartialEq, Eq)]
/// secp256k1 Key pair
///
/// Use `create()` to create a new random key pair.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::crypto::*;
/// use ethcore_util::hash::*;
/// fn main() {
/// let pair = KeyPair::create().unwrap();
/// let message = H256::random();
/// let signature = ec::sign(pair.secret(), &message).unwrap();
///
/// assert!(ec::verify(pair.public(), &signature, &message).unwrap());
/// assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public());
/// }
/// ```
pub struct KeyPair {
secret: Secret,
public: Public,
}
impl KeyPair {
/// Create a pair from secret key
pub fn from_secret(secret: Secret) -> Result<KeyPair, CryptoError> {
let context = &SECP256K1;
let s: key::SecretKey = try!(key::SecretKey::from_slice(context, &secret));
let pub_key = try!(key::PublicKey::from_secret_key(context, &s));
let serialized = pub_key.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
Ok(KeyPair {
secret: secret,
public: p,
})
}
/// Create a new random key pair
pub fn create() -> Result<KeyPair, CryptoError> {
let context = &SECP256K1;
let mut rng = try!(OsRng::new());
let (sec, publ) = try!(context.generate_keypair(&mut rng));
let serialized = publ.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
let s: Secret = unsafe { ::std::mem::transmute(sec) };
Ok(KeyPair {
secret: s,
public: p,
})
}
/// Returns public key
pub fn public(&self) -> &Public {
&self.public
}
/// Returns private key
pub fn secret(&self) -> &Secret {
&self.secret
}
/// Returns address.
pub fn address(&self) -> Address {
Address::from(self.public.sha3())
}
/// Sign a message with our secret key.
pub fn sign(&self, message: &H256) -> Result<Signature, CryptoError> {
ec::sign(&self.secret, message)
}
}
/// EC functions
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ec {
use numbers::*;
use standard::*;
use crypto::*;
use crypto;
/// Recovers Public key from signed message hash.
pub fn recover(signature: &Signature, message: &H256) -> Result<Public, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let rsig = try!(RecoverableSignature::from_compact(context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32))));
let publ = try!(context.recover(&try!(Message::from_slice(&message)), &rsig));
let serialized = publ.serialize_vec(context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
// TODO: check if it's the zero key and fail if so.
Ok(p)
}
/// Returns siganture of message hash.
pub fn sign(secret: &Secret, message: &H256) -> Result<Signature, CryptoError> {
// TODO: allow creation of only low-s signatures.
use secp256k1::*;
let context = &crypto::SECP256K1;
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let s = try!(context.sign_recoverable(&try!(Message::from_slice(&message)), sec));
let (rec_id, data) = s.serialize_compact(context);
let mut signature: crypto::Signature = unsafe { ::std::mem::uninitialized() };
signature.clone_from_slice(&data);
signature[64] = rec_id.to_i32() as u8;
let (_, s, v) = signature.to_rsv();
let secp256k1n = U256::from_str("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141").unwrap();
if!is_low_s(&s) {
signature = super::Signature::from_rsv(&H256::from_slice(&signature[0..32]), &H256::from(secp256k1n - s), v ^ 1);
}
Ok(signature)
}
/// Verify signature.
pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result<bool, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let rsig = try!(RecoverableSignature::from_compact(context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32))));
let sig = rsig.to_standard(context);
let mut pdata: [u8; 65] = [4u8; 65];
let ptr = pdata[1..].as_mut_ptr();
let src = public.as_ptr();
unsafe { ::std::ptr::copy_nonoverlapping(src, ptr, 64) };
let publ = try!(key::PublicKey::from_slice(context, &pdata));
match context.verify(&try!(Message::from_slice(&message)), &sig, &publ) {
Ok(_) => Ok(true),
Err(Error::IncorrectSignature) => Ok(false),
Err(x) => Err(CryptoError::from(x)),
}
}
/// Check if this is a "low" signature.
pub fn is_low(sig: &Signature) -> bool {
H256::from_slice(&sig[32..64]) <= h256_from_hex("7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0")
}
/// Check if this is a "low" signature.
pub fn is_low_s(s: &U256) -> bool {
s <= &U256::from_str("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0").unwrap()
}
/// Check if each component of the signature is in range.
pub fn is_valid(sig: &Signature) -> bool {
sig[64] <= 1 && H256::from_slice(&sig[0..32]) < h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141") && H256::from_slice(&sig[32..64]) < h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141") && H256::from_slice(&sig[32..64]) >= h256_from_u64(1) && H256::from_slice(&sig[0..32]) >= h256_from_u64(1)
}
}
/// ECDH functions
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ecdh {
use crypto::*;
use crypto;
/// Agree on a shared secret
pub fn agree(secret: &Secret, public: &Public) -> Result<Secret, CryptoError> {
use secp256k1::*;
let context = &crypto::SECP256K1;
let mut pdata: [u8; 65] = [4u8; 65];
let ptr = pdata[1..].as_mut_ptr();
let src = public.as_ptr();
unsafe { ::std::ptr::copy_nonoverlapping(src, ptr, 64) };
let publ = try!(key::PublicKey::from_slice(context, &pdata));
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec);
let s: Secret = unsafe { ::std::mem::transmute(shared) };
Ok(s)
}
}
/// ECIES function
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ecies {
use hash::*;
use bytes::*;
use crypto::*;
/// Encrypt a message with a public key
pub fn encrypt(public: &Public, shared_mac: &[u8], plain: &[u8]) -> Result<Bytes, CryptoError> {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
use rcrypto::hmac::Hmac;
use rcrypto::mac::Mac;
let r = try!(KeyPair::create());
let z = try!(ecdh::agree(r.secret(), public));
let mut key = [0u8; 32];
let mut mkey = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let mut hasher = Sha256::new();
let mkey_material = &key[16..32];
hasher.input(mkey_material);
hasher.result(&mut mkey);
let ekey = &key[0..16];
let mut msg = vec![0u8; (1 + 64 + 16 + plain.len() + 32)];
msg[0] = 0x04u8;
{
let msgd = &mut msg[1..];
r.public().copy_to(&mut msgd[0..64]);
{
let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())];
aes::encrypt(ekey, &H128::new(), plain, cipher);
}
let mut hmac = Hmac::new(Sha256::new(), &mkey);
{
let cipher_iv = &msgd[64..(64 + 16 + plain.len())];
hmac.input(cipher_iv);
}
hmac.input(shared_mac);
hmac.raw_result(&mut msgd[(64 + 16 + plain.len())..]);
}
Ok(msg)
}
/// Decrypt a message with a secret key
pub fn decrypt(secret: &Secret, shared_mac: &[u8], encrypted: &[u8]) -> Result<Bytes, CryptoError> {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
use rcrypto::hmac::Hmac;
use rcrypto::mac::Mac;
let meta_len = 1 + 64 + 16 + 32;
if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 {
return Err(CryptoError::InvalidMessage); //invalid message: publickey
}
let e = &encrypted[1..];
let p = Public::from_slice(&e[0..64]);
let z = try!(ecdh::agree(secret, &p));
let mut key = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let ekey = &key[0..16];
let mkey_material = &key[16..32];
let mut hasher = Sha256::new();
let mut mkey = [0u8; 32];
hasher.input(mkey_material);
hasher.result(&mut mkey);
let clen = encrypted.len() - meta_len;
let cipher_with_iv = &e[64..(64 + 16 + clen)];
let cipher_iv = &cipher_with_iv[0..16];
let cipher_no_iv = &cipher_with_iv[16..];
let msg_mac = &e[(64 + 16 + clen)..];
// Verify tag
let mut hmac = Hmac::new(Sha256::new(), &mkey);
hmac.input(cipher_with_iv);
hmac.input(shared_mac);
let mut mac = H256::new();
hmac.raw_result(&mut mac);
if &mac[..]!= msg_mac {
return Err(CryptoError::InvalidMessage);
}
let mut msg = vec![0u8; clen];
aes::decrypt(ekey, cipher_iv, cipher_no_iv, &mut msg[..]);
Ok(msg)
}
fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) {
use rcrypto::digest::Digest;
use rcrypto::sha2::Sha256;
let mut hasher = Sha256::new();
// SEC/ISO/Shoup specify counter size SHOULD be equivalent
// to size of hash output, however, it also notes that
// the 4 bytes is okay. NIST specifies 4 bytes.
let mut ctr = 1u32;
let mut written = 0usize;
while written < dest.len() {
let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8];
hasher.input(&ctrs);
hasher.input(secret);
hasher.input(s1);
hasher.result(&mut dest[written..(written + 32)]);
hasher.reset();
written += 32;
ctr += 1;
}
}
}
/// AES encryption
pub mod aes {
use ::rcrypto::blockmodes::*;
use ::rcrypto::aessafe::*;
use ::rcrypto::symmetriccipher::*;
use ::rcrypto::buffer::*;
/// Encrypt a message
pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding");
}
/// Decrypt a message
pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding");
}
}
#[cfg(test)]
mod tests {
use hash::*;
use crypto::*;
// TODO: tests for sign/recover roundtrip, at least.
#[test]
fn test_signature() {
let pair = KeyPair::create().unwrap();
let message = H256::random();
let signature = ec::sign(pair.secret(), &message).unwrap();
assert!(ec::verify(pair.public(), &signature, &message).unwrap());
assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public());
}
#[test]
fn test_invalid_key() {
assert!(KeyPair::from_secret(h256_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")).is_err());
assert!(KeyPair::from_secret(h256_from_hex("0000000000000000000000000000000000000000000000000000000000000000")).is_err());
assert!(KeyPair::from_secret(h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141")).is_err());
}
#[test]
fn test_key() {
let pair = KeyPair::from_secret(h256_from_hex("6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2")).unwrap();
assert_eq!(pair.public().hex(), "101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c");
}
#[test]
fn ecies_shared() {
let kp = KeyPair::create().unwrap();
let message = b"So many books, so little time";
let shared = b"shared";
let wrong_shared = b"incorrect";
let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap();
assert!(encrypted[..]!= message[..]);
assert_eq!(encrypted[0], 0x04);
assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err());
let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap();
assert_eq!(decrypted[..message.len()], message[..]);
}
}
|
pub enum CryptoError {
/// Invalid secret key
InvalidSecret,
/// Invalid public key
InvalidPublic,
|
random_line_split
|
country.rs
|
use std::collections::BTreeMap;
use std::f64;
use super::country_page_data::CountryPageData;
use super::event::Event;
use super::event_trunc::{EventTrunc, EventTruncRenderable};
use super::year::Year;
use super::year_page_data::YearPageData;
/// `Country` contains full country data
#[derive(Debug, Clone, RustcEncodable)]
pub struct Country {
pub events: Vec<Event>,
|
pub num_events: i32,
pub num_fatalities: i32,
}
impl Country {
pub fn new(t_link: String, t_name: String, num_eve: i32, num_fat: i32) -> Country {
Country {
events: Vec::new(),
link: t_link,
name: t_name,
num_events: num_eve,
num_fatalities: num_fat,
}
}
/// Create a blank `Country` from just the name
pub fn from_name(t_name: String) -> Country {
Country {
events: Vec::new(),
link: t_name.clone().replace(" ", ""),
name: t_name,
num_events: 0i32,
num_fatalities: 0i32,
}
}
pub fn to_page_data(&self) -> CountryPageData {
let mut t_years: BTreeMap<i32, Year> = BTreeMap::new();
for event in &self.events {
let c_year = t_years.entry(event.year).or_insert(Year::new(event.year));
c_year.events += 1;
c_year.fatalities += event.fatalities;
}
let mut year_vec: Vec<Year> = Vec::new();
for elem in t_years.values() {
year_vec.push(elem.clone());
}
for elem in &mut year_vec {
let f_eve: f64 = f64::from(elem.events);
let f_fat: f64 = f64::from(elem.fatalities);
let t_epd: f64 = f_eve / 365.0f64;
let t_fpe: f64 = f_fat / f_eve;
elem.epd = format!("{:.2}", t_epd);
elem.fpe = format!("{:.2}", t_fpe);
}
let mut t_vec = year_vec.clone();
t_vec.reverse();
CountryPageData {
found: true,
name: self.name.clone(),
link: self.link.clone(),
total_eve: t_vec.len() as i32,
years: t_vec,
}
}
pub fn to_year_data(&self, inp_year: i32) -> YearPageData {
let mut t_eve: Vec<EventTruncRenderable> = Vec::new();
for elem in &self.events {
if elem.year == inp_year {
t_eve.push(EventTruncRenderable::from_event(elem));
}
}
YearPageData {
eve_vec: t_eve,
name: self.name.clone(),
year: inp_year,
}
}
}
|
pub link: String,
pub name: String,
|
random_line_split
|
country.rs
|
use std::collections::BTreeMap;
use std::f64;
use super::country_page_data::CountryPageData;
use super::event::Event;
use super::event_trunc::{EventTrunc, EventTruncRenderable};
use super::year::Year;
use super::year_page_data::YearPageData;
/// `Country` contains full country data
#[derive(Debug, Clone, RustcEncodable)]
pub struct
|
{
pub events: Vec<Event>,
pub link: String,
pub name: String,
pub num_events: i32,
pub num_fatalities: i32,
}
impl Country {
pub fn new(t_link: String, t_name: String, num_eve: i32, num_fat: i32) -> Country {
Country {
events: Vec::new(),
link: t_link,
name: t_name,
num_events: num_eve,
num_fatalities: num_fat,
}
}
/// Create a blank `Country` from just the name
pub fn from_name(t_name: String) -> Country {
Country {
events: Vec::new(),
link: t_name.clone().replace(" ", ""),
name: t_name,
num_events: 0i32,
num_fatalities: 0i32,
}
}
pub fn to_page_data(&self) -> CountryPageData {
let mut t_years: BTreeMap<i32, Year> = BTreeMap::new();
for event in &self.events {
let c_year = t_years.entry(event.year).or_insert(Year::new(event.year));
c_year.events += 1;
c_year.fatalities += event.fatalities;
}
let mut year_vec: Vec<Year> = Vec::new();
for elem in t_years.values() {
year_vec.push(elem.clone());
}
for elem in &mut year_vec {
let f_eve: f64 = f64::from(elem.events);
let f_fat: f64 = f64::from(elem.fatalities);
let t_epd: f64 = f_eve / 365.0f64;
let t_fpe: f64 = f_fat / f_eve;
elem.epd = format!("{:.2}", t_epd);
elem.fpe = format!("{:.2}", t_fpe);
}
let mut t_vec = year_vec.clone();
t_vec.reverse();
CountryPageData {
found: true,
name: self.name.clone(),
link: self.link.clone(),
total_eve: t_vec.len() as i32,
years: t_vec,
}
}
pub fn to_year_data(&self, inp_year: i32) -> YearPageData {
let mut t_eve: Vec<EventTruncRenderable> = Vec::new();
for elem in &self.events {
if elem.year == inp_year {
t_eve.push(EventTruncRenderable::from_event(elem));
}
}
YearPageData {
eve_vec: t_eve,
name: self.name.clone(),
year: inp_year,
}
}
}
|
Country
|
identifier_name
|
country.rs
|
use std::collections::BTreeMap;
use std::f64;
use super::country_page_data::CountryPageData;
use super::event::Event;
use super::event_trunc::{EventTrunc, EventTruncRenderable};
use super::year::Year;
use super::year_page_data::YearPageData;
/// `Country` contains full country data
#[derive(Debug, Clone, RustcEncodable)]
pub struct Country {
pub events: Vec<Event>,
pub link: String,
pub name: String,
pub num_events: i32,
pub num_fatalities: i32,
}
impl Country {
pub fn new(t_link: String, t_name: String, num_eve: i32, num_fat: i32) -> Country {
Country {
events: Vec::new(),
link: t_link,
name: t_name,
num_events: num_eve,
num_fatalities: num_fat,
}
}
/// Create a blank `Country` from just the name
pub fn from_name(t_name: String) -> Country {
Country {
events: Vec::new(),
link: t_name.clone().replace(" ", ""),
name: t_name,
num_events: 0i32,
num_fatalities: 0i32,
}
}
pub fn to_page_data(&self) -> CountryPageData {
let mut t_years: BTreeMap<i32, Year> = BTreeMap::new();
for event in &self.events {
let c_year = t_years.entry(event.year).or_insert(Year::new(event.year));
c_year.events += 1;
c_year.fatalities += event.fatalities;
}
let mut year_vec: Vec<Year> = Vec::new();
for elem in t_years.values() {
year_vec.push(elem.clone());
}
for elem in &mut year_vec {
let f_eve: f64 = f64::from(elem.events);
let f_fat: f64 = f64::from(elem.fatalities);
let t_epd: f64 = f_eve / 365.0f64;
let t_fpe: f64 = f_fat / f_eve;
elem.epd = format!("{:.2}", t_epd);
elem.fpe = format!("{:.2}", t_fpe);
}
let mut t_vec = year_vec.clone();
t_vec.reverse();
CountryPageData {
found: true,
name: self.name.clone(),
link: self.link.clone(),
total_eve: t_vec.len() as i32,
years: t_vec,
}
}
pub fn to_year_data(&self, inp_year: i32) -> YearPageData {
let mut t_eve: Vec<EventTruncRenderable> = Vec::new();
for elem in &self.events {
if elem.year == inp_year
|
}
YearPageData {
eve_vec: t_eve,
name: self.name.clone(),
year: inp_year,
}
}
}
|
{
t_eve.push(EventTruncRenderable::from_event(elem));
}
|
conditional_block
|
country.rs
|
use std::collections::BTreeMap;
use std::f64;
use super::country_page_data::CountryPageData;
use super::event::Event;
use super::event_trunc::{EventTrunc, EventTruncRenderable};
use super::year::Year;
use super::year_page_data::YearPageData;
/// `Country` contains full country data
#[derive(Debug, Clone, RustcEncodable)]
pub struct Country {
pub events: Vec<Event>,
pub link: String,
pub name: String,
pub num_events: i32,
pub num_fatalities: i32,
}
impl Country {
pub fn new(t_link: String, t_name: String, num_eve: i32, num_fat: i32) -> Country {
Country {
events: Vec::new(),
link: t_link,
name: t_name,
num_events: num_eve,
num_fatalities: num_fat,
}
}
/// Create a blank `Country` from just the name
pub fn from_name(t_name: String) -> Country {
Country {
events: Vec::new(),
link: t_name.clone().replace(" ", ""),
name: t_name,
num_events: 0i32,
num_fatalities: 0i32,
}
}
pub fn to_page_data(&self) -> CountryPageData {
let mut t_years: BTreeMap<i32, Year> = BTreeMap::new();
for event in &self.events {
let c_year = t_years.entry(event.year).or_insert(Year::new(event.year));
c_year.events += 1;
c_year.fatalities += event.fatalities;
}
let mut year_vec: Vec<Year> = Vec::new();
for elem in t_years.values() {
year_vec.push(elem.clone());
}
for elem in &mut year_vec {
let f_eve: f64 = f64::from(elem.events);
let f_fat: f64 = f64::from(elem.fatalities);
let t_epd: f64 = f_eve / 365.0f64;
let t_fpe: f64 = f_fat / f_eve;
elem.epd = format!("{:.2}", t_epd);
elem.fpe = format!("{:.2}", t_fpe);
}
let mut t_vec = year_vec.clone();
t_vec.reverse();
CountryPageData {
found: true,
name: self.name.clone(),
link: self.link.clone(),
total_eve: t_vec.len() as i32,
years: t_vec,
}
}
pub fn to_year_data(&self, inp_year: i32) -> YearPageData
|
}
|
{
let mut t_eve: Vec<EventTruncRenderable> = Vec::new();
for elem in &self.events {
if elem.year == inp_year {
t_eve.push(EventTruncRenderable::from_event(elem));
}
}
YearPageData {
eve_vec: t_eve,
name: self.name.clone(),
year: inp_year,
}
}
|
identifier_body
|
lights.rs
|
use cgmath::Point2;
use layer::LayerId;
#[derive(Clone, Debug)]
pub struct AmbientLight {
pub color: LightColor,
pub intensity: f32,
}
#[derive(Clone, Debug)]
pub struct Light {
pub center: Point2<f32>,
pub radius: f32,
pub source_radius: f32,
pub source_layer: LayerId,
pub color: LightColor,
pub intensity: f32,
}
#[derive(Clone, Copy, Debug)]
pub struct LightColor {
pub r: f32,
pub g: f32,
pub b: f32,
}
impl LightColor {
pub fn
|
([r, g, b]: [f32; 3]) -> Self {
use color::component_srgb_to_linear as conv;
LightColor {
r: conv(r),
g: conv(g),
b: conv(b),
}
}
pub fn from_array([r, g, b]: [f32; 3]) -> Self {
LightColor { r: r, g: g, b: b }
}
pub fn to_array(&self) -> [f32; 3] {
[self.r, self.g, self.b]
}
}
|
from_srgb
|
identifier_name
|
lights.rs
|
use cgmath::Point2;
use layer::LayerId;
#[derive(Clone, Debug)]
pub struct AmbientLight {
pub color: LightColor,
pub intensity: f32,
}
#[derive(Clone, Debug)]
pub struct Light {
pub center: Point2<f32>,
pub radius: f32,
pub source_radius: f32,
pub source_layer: LayerId,
pub color: LightColor,
|
#[derive(Clone, Copy, Debug)]
pub struct LightColor {
pub r: f32,
pub g: f32,
pub b: f32,
}
impl LightColor {
pub fn from_srgb([r, g, b]: [f32; 3]) -> Self {
use color::component_srgb_to_linear as conv;
LightColor {
r: conv(r),
g: conv(g),
b: conv(b),
}
}
pub fn from_array([r, g, b]: [f32; 3]) -> Self {
LightColor { r: r, g: g, b: b }
}
pub fn to_array(&self) -> [f32; 3] {
[self.r, self.g, self.b]
}
}
|
pub intensity: f32,
}
|
random_line_split
|
tile.rs
|
use std::f64::{self, consts};
const EARTH_RADIUS: f64 = 6378137.0;
const PERIMETER: f64 = EARTH_RADIUS * 2. * consts::PI;
#[allow(dead_code)]
/**
* Turns WGS 84 coordinates into WebMercator
* tiles numbering
*/
pub fn lon_lat_to_tile(lon: f64, lat: f64, zoom: u32) -> (u32, u32, u32) {
let lat = lat.to_radians();
let n = 2_f64.powf(zoom as f64);
let xtile: f64 = ((lon + 180.0) / 360.0) * n;
let ytile: f64 = (1.0 - (lat.tan() + (1.0 / lat.cos())).ln() / consts::PI) / 2.0 * n;
(xtile.floor() as u32, ytile.floor() as u32, zoom)
}
/**
* Turns WebMercator coordinates into WGS 84 coordinates
*/
pub fn tile_to_wgs84(x: u32, y: u32, z: u32) -> (f64, f64){
let (x, y) = (x as f64, y as f64);
let n = 2_f64.powf(z as f64);
let lon_deg = x / n * 360.0 - 180.0;
let lat_deg = (consts::PI * (1.0 - 2.0 * y / n)).sinh().atan().to_degrees();
(lon_deg, lat_deg)
}
/**
* Turns tiles coordinates into WebMercator (EPSG:3857)
*/
pub fn tile_to_3857(x: u32, y: u32, z: u32) -> (f64, f64){
let (x, y) = (x as f64, y as f64);
let n = 2_f64.powf(z as f64);
let resolution = PERIMETER / n;
let x_meter = (x * resolution) - PERIMETER / 2.;
let y_meter = -(y * resolution) + PERIMETER / 2.;
(x_meter, y_meter)
}
/**
* Turns WGS84 longitude into meters (Spherical mercator)
*/
pub fn lon_wgs84_to_meters(lon: f64) -> f64 {
EARTH_RADIUS * lon.to_radians()
}
/**
* Turns WGS84 latitude into meters (Spherical mercator)
*/
pub fn lat_wgs84_to_meters(lat: f64) -> f64 {
//EARTH_RADIUS * (consts::PI / 4. + lat.to_radians() / 2.).tan().ln()
EARTH_RADIUS * lat.to_radians().sin().atanh()
}
/**
* Turns WGS84 coordinates into meters (Spherical mercator)
*/
pub fn wgs84_to_meters(lon: f64, lat: f64) -> (f64, f64){
(lon_wgs84_to_meters(lon), lat_wgs84_to_meters(lat))
}
#[derive(Debug, PartialEq)]
pub struct LonLatBbox {
/// degrees
pub west: f64,
/// degrees
pub south: f64,
/// degrees
pub east: f64,
/// degrees
pub north: f64,
}
impl LonLatBbox {
pub fn xy(&self) -> Bbox {
let (west, north) = wgs84_to_meters(self.west, self.north);
let (east, south) = wgs84_to_meters(self.east, self.south);
Bbox {west, south, east, north}
}
}
#[derive(Debug, PartialEq)]
pub struct Bbox {
/// meters
pub west: f64,
/// meters
pub south: f64,
/// meters
pub east: f64,
/// meters
pub north: f64,
}
/// This struct holds basic informations about a Tile.
#[derive(Debug, PartialEq)]
pub struct Tile {
/// `x` coordinate of a tile
pub x: u32,
/// `y` coordinate of a tile
pub y: u32,
/// zoom level, (from 0 to 19 included)
pub z: u32,
}
impl Tile {
/**
* Returns the bounding box of self,
* expressed in WGS 84
* */
pub fn bounds(&self) -> LonLatBbox {
let (west, north) = tile_to_wgs84(self.x, self.y, self.z);
let (east, south) = tile_to_wgs84(self.x + 1, self.y + 1, self.z);
LonLatBbox {west, south, east, north}
}
/**
* Returns Bounding box in Spheriacl mercator coordinates (meters)
*/
pub fn xy_bounds(&self) -> Bbox {
//self.bounds().xy()
let (west, north) = tile_to_3857(self.x, self.y, self.z);
let (east, south) = tile_to_3857(self.x + 1, self.y + 1, self.z);
Bbox {west, south, east, north}
}
}
#[test]
fn test_tile_to_wgs84() {
assert_eq!((0.0, 66.51326044311186), tile_to_wgs84(2, 1, 2));
assert_eq!((270.0, -85.0511287798066), tile_to_wgs84(5, 4, 2));
assert_eq!((-9.140625, 53.33087298301705), tile_to_wgs84(486, 332, 10));
}
#[test]
fn test_lon_lat_to_tile() {
assert_eq!((16, 14, 5), lon_lat_to_tile(10.0, 20.0, 5));
assert_eq!((15, 14, 5), lon_lat_to_tile(-10.0, 20.0, 5));
assert_eq!((23, 7, 5), lon_lat_to_tile(80.0, 70.0, 5));
assert_eq!((1, 0, 1), lon_lat_to_tile(80.0, 70.0, 1));
}
#[test]
fn test_bounds()
|
{
let bbox = LonLatBbox {
west: 78.75,
south: 66.51326044311186,
east: 90.0,
north: 70.61261423801925
};
let tile = Tile {
x: 23,
y: 7,
z: 5,
};
assert_eq!(bbox, tile.bounds());
}
|
identifier_body
|
|
tile.rs
|
use std::f64::{self, consts};
const EARTH_RADIUS: f64 = 6378137.0;
const PERIMETER: f64 = EARTH_RADIUS * 2. * consts::PI;
|
* tiles numbering
*/
pub fn lon_lat_to_tile(lon: f64, lat: f64, zoom: u32) -> (u32, u32, u32) {
let lat = lat.to_radians();
let n = 2_f64.powf(zoom as f64);
let xtile: f64 = ((lon + 180.0) / 360.0) * n;
let ytile: f64 = (1.0 - (lat.tan() + (1.0 / lat.cos())).ln() / consts::PI) / 2.0 * n;
(xtile.floor() as u32, ytile.floor() as u32, zoom)
}
/**
* Turns WebMercator coordinates into WGS 84 coordinates
*/
pub fn tile_to_wgs84(x: u32, y: u32, z: u32) -> (f64, f64){
let (x, y) = (x as f64, y as f64);
let n = 2_f64.powf(z as f64);
let lon_deg = x / n * 360.0 - 180.0;
let lat_deg = (consts::PI * (1.0 - 2.0 * y / n)).sinh().atan().to_degrees();
(lon_deg, lat_deg)
}
/**
* Turns tiles coordinates into WebMercator (EPSG:3857)
*/
pub fn tile_to_3857(x: u32, y: u32, z: u32) -> (f64, f64){
let (x, y) = (x as f64, y as f64);
let n = 2_f64.powf(z as f64);
let resolution = PERIMETER / n;
let x_meter = (x * resolution) - PERIMETER / 2.;
let y_meter = -(y * resolution) + PERIMETER / 2.;
(x_meter, y_meter)
}
/**
* Turns WGS84 longitude into meters (Spherical mercator)
*/
pub fn lon_wgs84_to_meters(lon: f64) -> f64 {
EARTH_RADIUS * lon.to_radians()
}
/**
* Turns WGS84 latitude into meters (Spherical mercator)
*/
pub fn lat_wgs84_to_meters(lat: f64) -> f64 {
//EARTH_RADIUS * (consts::PI / 4. + lat.to_radians() / 2.).tan().ln()
EARTH_RADIUS * lat.to_radians().sin().atanh()
}
/**
* Turns WGS84 coordinates into meters (Spherical mercator)
*/
pub fn wgs84_to_meters(lon: f64, lat: f64) -> (f64, f64){
(lon_wgs84_to_meters(lon), lat_wgs84_to_meters(lat))
}
#[derive(Debug, PartialEq)]
pub struct LonLatBbox {
/// degrees
pub west: f64,
/// degrees
pub south: f64,
/// degrees
pub east: f64,
/// degrees
pub north: f64,
}
impl LonLatBbox {
pub fn xy(&self) -> Bbox {
let (west, north) = wgs84_to_meters(self.west, self.north);
let (east, south) = wgs84_to_meters(self.east, self.south);
Bbox {west, south, east, north}
}
}
#[derive(Debug, PartialEq)]
pub struct Bbox {
/// meters
pub west: f64,
/// meters
pub south: f64,
/// meters
pub east: f64,
/// meters
pub north: f64,
}
/// This struct holds basic informations about a Tile.
#[derive(Debug, PartialEq)]
pub struct Tile {
/// `x` coordinate of a tile
pub x: u32,
/// `y` coordinate of a tile
pub y: u32,
/// zoom level, (from 0 to 19 included)
pub z: u32,
}
impl Tile {
/**
* Returns the bounding box of self,
* expressed in WGS 84
* */
pub fn bounds(&self) -> LonLatBbox {
let (west, north) = tile_to_wgs84(self.x, self.y, self.z);
let (east, south) = tile_to_wgs84(self.x + 1, self.y + 1, self.z);
LonLatBbox {west, south, east, north}
}
/**
* Returns Bounding box in Spheriacl mercator coordinates (meters)
*/
pub fn xy_bounds(&self) -> Bbox {
//self.bounds().xy()
let (west, north) = tile_to_3857(self.x, self.y, self.z);
let (east, south) = tile_to_3857(self.x + 1, self.y + 1, self.z);
Bbox {west, south, east, north}
}
}
#[test]
fn test_tile_to_wgs84() {
assert_eq!((0.0, 66.51326044311186), tile_to_wgs84(2, 1, 2));
assert_eq!((270.0, -85.0511287798066), tile_to_wgs84(5, 4, 2));
assert_eq!((-9.140625, 53.33087298301705), tile_to_wgs84(486, 332, 10));
}
#[test]
fn test_lon_lat_to_tile() {
assert_eq!((16, 14, 5), lon_lat_to_tile(10.0, 20.0, 5));
assert_eq!((15, 14, 5), lon_lat_to_tile(-10.0, 20.0, 5));
assert_eq!((23, 7, 5), lon_lat_to_tile(80.0, 70.0, 5));
assert_eq!((1, 0, 1), lon_lat_to_tile(80.0, 70.0, 1));
}
#[test]
fn test_bounds() {
let bbox = LonLatBbox {
west: 78.75,
south: 66.51326044311186,
east: 90.0,
north: 70.61261423801925
};
let tile = Tile {
x: 23,
y: 7,
z: 5,
};
assert_eq!(bbox, tile.bounds());
}
|
#[allow(dead_code)]
/**
* Turns WGS 84 coordinates into WebMercator
|
random_line_split
|
tile.rs
|
use std::f64::{self, consts};
const EARTH_RADIUS: f64 = 6378137.0;
const PERIMETER: f64 = EARTH_RADIUS * 2. * consts::PI;
#[allow(dead_code)]
/**
* Turns WGS 84 coordinates into WebMercator
* tiles numbering
*/
pub fn lon_lat_to_tile(lon: f64, lat: f64, zoom: u32) -> (u32, u32, u32) {
let lat = lat.to_radians();
let n = 2_f64.powf(zoom as f64);
let xtile: f64 = ((lon + 180.0) / 360.0) * n;
let ytile: f64 = (1.0 - (lat.tan() + (1.0 / lat.cos())).ln() / consts::PI) / 2.0 * n;
(xtile.floor() as u32, ytile.floor() as u32, zoom)
}
/**
* Turns WebMercator coordinates into WGS 84 coordinates
*/
pub fn tile_to_wgs84(x: u32, y: u32, z: u32) -> (f64, f64){
let (x, y) = (x as f64, y as f64);
let n = 2_f64.powf(z as f64);
let lon_deg = x / n * 360.0 - 180.0;
let lat_deg = (consts::PI * (1.0 - 2.0 * y / n)).sinh().atan().to_degrees();
(lon_deg, lat_deg)
}
/**
* Turns tiles coordinates into WebMercator (EPSG:3857)
*/
pub fn tile_to_3857(x: u32, y: u32, z: u32) -> (f64, f64){
let (x, y) = (x as f64, y as f64);
let n = 2_f64.powf(z as f64);
let resolution = PERIMETER / n;
let x_meter = (x * resolution) - PERIMETER / 2.;
let y_meter = -(y * resolution) + PERIMETER / 2.;
(x_meter, y_meter)
}
/**
* Turns WGS84 longitude into meters (Spherical mercator)
*/
pub fn lon_wgs84_to_meters(lon: f64) -> f64 {
EARTH_RADIUS * lon.to_radians()
}
/**
* Turns WGS84 latitude into meters (Spherical mercator)
*/
pub fn lat_wgs84_to_meters(lat: f64) -> f64 {
//EARTH_RADIUS * (consts::PI / 4. + lat.to_radians() / 2.).tan().ln()
EARTH_RADIUS * lat.to_radians().sin().atanh()
}
/**
* Turns WGS84 coordinates into meters (Spherical mercator)
*/
pub fn wgs84_to_meters(lon: f64, lat: f64) -> (f64, f64){
(lon_wgs84_to_meters(lon), lat_wgs84_to_meters(lat))
}
#[derive(Debug, PartialEq)]
pub struct LonLatBbox {
/// degrees
pub west: f64,
/// degrees
pub south: f64,
/// degrees
pub east: f64,
/// degrees
pub north: f64,
}
impl LonLatBbox {
pub fn
|
(&self) -> Bbox {
let (west, north) = wgs84_to_meters(self.west, self.north);
let (east, south) = wgs84_to_meters(self.east, self.south);
Bbox {west, south, east, north}
}
}
#[derive(Debug, PartialEq)]
pub struct Bbox {
/// meters
pub west: f64,
/// meters
pub south: f64,
/// meters
pub east: f64,
/// meters
pub north: f64,
}
/// This struct holds basic informations about a Tile.
#[derive(Debug, PartialEq)]
pub struct Tile {
/// `x` coordinate of a tile
pub x: u32,
/// `y` coordinate of a tile
pub y: u32,
/// zoom level, (from 0 to 19 included)
pub z: u32,
}
impl Tile {
/**
* Returns the bounding box of self,
* expressed in WGS 84
* */
pub fn bounds(&self) -> LonLatBbox {
let (west, north) = tile_to_wgs84(self.x, self.y, self.z);
let (east, south) = tile_to_wgs84(self.x + 1, self.y + 1, self.z);
LonLatBbox {west, south, east, north}
}
/**
* Returns Bounding box in Spheriacl mercator coordinates (meters)
*/
pub fn xy_bounds(&self) -> Bbox {
//self.bounds().xy()
let (west, north) = tile_to_3857(self.x, self.y, self.z);
let (east, south) = tile_to_3857(self.x + 1, self.y + 1, self.z);
Bbox {west, south, east, north}
}
}
#[test]
fn test_tile_to_wgs84() {
assert_eq!((0.0, 66.51326044311186), tile_to_wgs84(2, 1, 2));
assert_eq!((270.0, -85.0511287798066), tile_to_wgs84(5, 4, 2));
assert_eq!((-9.140625, 53.33087298301705), tile_to_wgs84(486, 332, 10));
}
#[test]
fn test_lon_lat_to_tile() {
assert_eq!((16, 14, 5), lon_lat_to_tile(10.0, 20.0, 5));
assert_eq!((15, 14, 5), lon_lat_to_tile(-10.0, 20.0, 5));
assert_eq!((23, 7, 5), lon_lat_to_tile(80.0, 70.0, 5));
assert_eq!((1, 0, 1), lon_lat_to_tile(80.0, 70.0, 1));
}
#[test]
fn test_bounds() {
let bbox = LonLatBbox {
west: 78.75,
south: 66.51326044311186,
east: 90.0,
north: 70.61261423801925
};
let tile = Tile {
x: 23,
y: 7,
z: 5,
};
assert_eq!(bbox, tile.bounds());
}
|
xy
|
identifier_name
|
rect.rs
|
use std::convert::From;
use graphics::math::{ self, Scalar };
use { Point, Size };
/// A rectangle.
#[derive(Clone, Copy, Debug)]
pub struct Rect {
/// The position of the top left corner of the rectangle.
pub pos: Point,
/// The width and height of the rectangle.
pub size: Size,
}
impl<P: Into<Point>, S: Into<Size>> From<(P, S)> for Rect {
/// Creates a rectangle from the position of its top left corner and its size.
fn from((pos, size): (P, S)) -> Rect {
let (pos, size): (Point, Size) = (pos.into(), size.into());
Rect { pos: pos, size: size }
}
}
impl From<Rect> for [Scalar; 4] {
fn from(rect: Rect) -> [Scalar; 4] {
[rect.pos.x, rect.pos.y, rect.size.w, rect.size.h]
}
}
impl From<[Scalar; 4]> for Rect {
/// Creates a rectangle from an array.
fn from(v: [Scalar; 4]) -> Rect {
Rect {
pos: Point { x: v[0], y: v[1] },
size: Size { w: v[2], h: v[3] },
}
}
}
impl From<(Scalar, Scalar, Scalar, Scalar)> for Rect {
fn from((x, y, w, h): (Scalar, Scalar, Scalar, Scalar)) -> Rect {
Rect {
pos: Point { x: x, y: y },
size: Size { w: w, h: h },
}
}
}
impl Rect {
/// Returns the position of the bottom side of the rectangle.
pub fn bottom(&self) -> Scalar {
self.pos.y + self.size.h
}
/// Computes a rectangle with quadruple the surface area of self and with center
/// (self.x, self.y).
pub fn centered(self) -> Rect {
Rect {
pos: Point {
x: self.pos.x - self.size.w,
y: self.pos.y - self.size.h,
},
size: self.size * 2.0,
}
}
/// Compute whether or not the point is inside the rectangle.
#[inline(always)]
pub fn contains<T: Into<Point>>(&self, point: T) -> bool {
let point: Point = point.into();
self.left() < point.x && point.x < self.right() &&
self.top() < point.y && point.y < self.bottom()
}
/// Create a rectangle that circumscribes the given circle.
pub fn new_circle<T: Into<Point>>(center: T, radius: Scalar) -> Rect {
let center: Point = center.into();
Rect {
pos: Point {
x: center.x - radius,
y: center.y - radius,
},
size: Size {
w: 2.0 * radius,
h: 2.0 * radius,
},
}
}
/// Create a square rectangle with sides of length len and top left corner at pos.
pub fn new_square<T: Into<Point>>(pos: T, len: Scalar) -> Rect {
let pos: Point = pos.into();
Rect {
pos: pos,
size: Size { w: len, h: len },
}
}
/// Returns the position of the left side of the rectangle.
pub fn left(&self) -> Scalar {
self.pos.x
}
/// Computes a rectangle whose perimeter forms the inside edge of margin with size m for self.
#[inline(always)]
pub fn margin(self, m: Scalar) -> Rect {
math::margin_rectangle(self.into(), m).into()
}
/// Computes a rectangle translated (slid) in the direction of the vector a distance relative
/// to the size of the rectangle. For example, self.relative([1.0, 1.0]) returns a rectangle
/// one rectangle to the right and down from the original.
#[inline(always)]
pub fn relative<T: Into<Point>>(self, v: T) -> Rect {
let v: Point = v.into();
Rect {
pos: Point {
x: self.pos.x + self.size.w * v.x,
y: self.pos.y + self.size.h * v.y,
},
size: self.size,
}
}
|
/// Computes a scaled rectangle with the same position as self.
pub fn scaled<T: Into<Size>>(self, v: T) -> Rect {
let v: Size = v.into();
Rect {
pos: self.pos,
size: self.size * v,
}
}
/// Returns the position of the top side of the rectangle.
pub fn top(&self) -> Scalar {
self.pos.y
}
}
|
/// Returns the position of the right side of the rectangle.
pub fn right(&self) -> Scalar {
self.pos.x + self.size.w
}
|
random_line_split
|
rect.rs
|
use std::convert::From;
use graphics::math::{ self, Scalar };
use { Point, Size };
/// A rectangle.
#[derive(Clone, Copy, Debug)]
pub struct Rect {
/// The position of the top left corner of the rectangle.
pub pos: Point,
/// The width and height of the rectangle.
pub size: Size,
}
impl<P: Into<Point>, S: Into<Size>> From<(P, S)> for Rect {
/// Creates a rectangle from the position of its top left corner and its size.
fn from((pos, size): (P, S)) -> Rect {
let (pos, size): (Point, Size) = (pos.into(), size.into());
Rect { pos: pos, size: size }
}
}
impl From<Rect> for [Scalar; 4] {
fn from(rect: Rect) -> [Scalar; 4] {
[rect.pos.x, rect.pos.y, rect.size.w, rect.size.h]
}
}
impl From<[Scalar; 4]> for Rect {
/// Creates a rectangle from an array.
fn from(v: [Scalar; 4]) -> Rect {
Rect {
pos: Point { x: v[0], y: v[1] },
size: Size { w: v[2], h: v[3] },
}
}
}
impl From<(Scalar, Scalar, Scalar, Scalar)> for Rect {
fn from((x, y, w, h): (Scalar, Scalar, Scalar, Scalar)) -> Rect
|
}
impl Rect {
/// Returns the position of the bottom side of the rectangle.
pub fn bottom(&self) -> Scalar {
self.pos.y + self.size.h
}
/// Computes a rectangle with quadruple the surface area of self and with center
/// (self.x, self.y).
pub fn centered(self) -> Rect {
Rect {
pos: Point {
x: self.pos.x - self.size.w,
y: self.pos.y - self.size.h,
},
size: self.size * 2.0,
}
}
/// Compute whether or not the point is inside the rectangle.
#[inline(always)]
pub fn contains<T: Into<Point>>(&self, point: T) -> bool {
let point: Point = point.into();
self.left() < point.x && point.x < self.right() &&
self.top() < point.y && point.y < self.bottom()
}
/// Create a rectangle that circumscribes the given circle.
pub fn new_circle<T: Into<Point>>(center: T, radius: Scalar) -> Rect {
let center: Point = center.into();
Rect {
pos: Point {
x: center.x - radius,
y: center.y - radius,
},
size: Size {
w: 2.0 * radius,
h: 2.0 * radius,
},
}
}
/// Create a square rectangle with sides of length len and top left corner at pos.
pub fn new_square<T: Into<Point>>(pos: T, len: Scalar) -> Rect {
let pos: Point = pos.into();
Rect {
pos: pos,
size: Size { w: len, h: len },
}
}
/// Returns the position of the left side of the rectangle.
pub fn left(&self) -> Scalar {
self.pos.x
}
/// Computes a rectangle whose perimeter forms the inside edge of margin with size m for self.
#[inline(always)]
pub fn margin(self, m: Scalar) -> Rect {
math::margin_rectangle(self.into(), m).into()
}
/// Computes a rectangle translated (slid) in the direction of the vector a distance relative
/// to the size of the rectangle. For example, self.relative([1.0, 1.0]) returns a rectangle
/// one rectangle to the right and down from the original.
#[inline(always)]
pub fn relative<T: Into<Point>>(self, v: T) -> Rect {
let v: Point = v.into();
Rect {
pos: Point {
x: self.pos.x + self.size.w * v.x,
y: self.pos.y + self.size.h * v.y,
},
size: self.size,
}
}
/// Returns the position of the right side of the rectangle.
pub fn right(&self) -> Scalar {
self.pos.x + self.size.w
}
/// Computes a scaled rectangle with the same position as self.
pub fn scaled<T: Into<Size>>(self, v: T) -> Rect {
let v: Size = v.into();
Rect {
pos: self.pos,
size: self.size * v,
}
}
/// Returns the position of the top side of the rectangle.
pub fn top(&self) -> Scalar {
self.pos.y
}
}
|
{
Rect {
pos: Point { x: x, y: y },
size: Size { w: w, h: h },
}
}
|
identifier_body
|
rect.rs
|
use std::convert::From;
use graphics::math::{ self, Scalar };
use { Point, Size };
/// A rectangle.
#[derive(Clone, Copy, Debug)]
pub struct Rect {
/// The position of the top left corner of the rectangle.
pub pos: Point,
/// The width and height of the rectangle.
pub size: Size,
}
impl<P: Into<Point>, S: Into<Size>> From<(P, S)> for Rect {
/// Creates a rectangle from the position of its top left corner and its size.
fn from((pos, size): (P, S)) -> Rect {
let (pos, size): (Point, Size) = (pos.into(), size.into());
Rect { pos: pos, size: size }
}
}
impl From<Rect> for [Scalar; 4] {
fn from(rect: Rect) -> [Scalar; 4] {
[rect.pos.x, rect.pos.y, rect.size.w, rect.size.h]
}
}
impl From<[Scalar; 4]> for Rect {
/// Creates a rectangle from an array.
fn
|
(v: [Scalar; 4]) -> Rect {
Rect {
pos: Point { x: v[0], y: v[1] },
size: Size { w: v[2], h: v[3] },
}
}
}
impl From<(Scalar, Scalar, Scalar, Scalar)> for Rect {
fn from((x, y, w, h): (Scalar, Scalar, Scalar, Scalar)) -> Rect {
Rect {
pos: Point { x: x, y: y },
size: Size { w: w, h: h },
}
}
}
impl Rect {
/// Returns the position of the bottom side of the rectangle.
pub fn bottom(&self) -> Scalar {
self.pos.y + self.size.h
}
/// Computes a rectangle with quadruple the surface area of self and with center
/// (self.x, self.y).
pub fn centered(self) -> Rect {
Rect {
pos: Point {
x: self.pos.x - self.size.w,
y: self.pos.y - self.size.h,
},
size: self.size * 2.0,
}
}
/// Compute whether or not the point is inside the rectangle.
#[inline(always)]
pub fn contains<T: Into<Point>>(&self, point: T) -> bool {
let point: Point = point.into();
self.left() < point.x && point.x < self.right() &&
self.top() < point.y && point.y < self.bottom()
}
/// Create a rectangle that circumscribes the given circle.
pub fn new_circle<T: Into<Point>>(center: T, radius: Scalar) -> Rect {
let center: Point = center.into();
Rect {
pos: Point {
x: center.x - radius,
y: center.y - radius,
},
size: Size {
w: 2.0 * radius,
h: 2.0 * radius,
},
}
}
/// Create a square rectangle with sides of length len and top left corner at pos.
pub fn new_square<T: Into<Point>>(pos: T, len: Scalar) -> Rect {
let pos: Point = pos.into();
Rect {
pos: pos,
size: Size { w: len, h: len },
}
}
/// Returns the position of the left side of the rectangle.
pub fn left(&self) -> Scalar {
self.pos.x
}
/// Computes a rectangle whose perimeter forms the inside edge of margin with size m for self.
#[inline(always)]
pub fn margin(self, m: Scalar) -> Rect {
math::margin_rectangle(self.into(), m).into()
}
/// Computes a rectangle translated (slid) in the direction of the vector a distance relative
/// to the size of the rectangle. For example, self.relative([1.0, 1.0]) returns a rectangle
/// one rectangle to the right and down from the original.
#[inline(always)]
pub fn relative<T: Into<Point>>(self, v: T) -> Rect {
let v: Point = v.into();
Rect {
pos: Point {
x: self.pos.x + self.size.w * v.x,
y: self.pos.y + self.size.h * v.y,
},
size: self.size,
}
}
/// Returns the position of the right side of the rectangle.
pub fn right(&self) -> Scalar {
self.pos.x + self.size.w
}
/// Computes a scaled rectangle with the same position as self.
pub fn scaled<T: Into<Size>>(self, v: T) -> Rect {
let v: Size = v.into();
Rect {
pos: self.pos,
size: self.size * v,
}
}
/// Returns the position of the top side of the rectangle.
pub fn top(&self) -> Scalar {
self.pos.y
}
}
|
from
|
identifier_name
|
token.rs
|
delim="#".repeat(n), string=ident_to_str(s))
}
/* Name components */
IDENT(s, _) => input.get(s.name).to_owned(),
LIFETIME(s) => format!("'{}", input.get(s.name)),
UNDERSCORE => ~"_",
/* Other */
DOC_COMMENT(ref s) => ident_to_str(s).to_owned(),
EOF => ~"<eof>",
INTERPOLATED(ref nt) => {
match nt {
&NtExpr(e) => ::print::pprust::expr_to_str(e, input),
&NtAttr(e) => ::print::pprust::attribute_to_str(e, input),
_ => {
~"an interpolated " +
match (*nt) {
NtItem(..) => ~"item",
NtBlock(..) => ~"block",
NtStmt(..) => ~"statement",
NtPat(..) => ~"pattern",
NtAttr(..) => fail!("should have been handled"),
NtExpr(..) => fail!("should have been handled above"),
NtTy(..) => ~"type",
NtIdent(..) => ~"identifier",
NtPath(..) => ~"path",
NtTT(..) => ~"tt",
NtMatchers(..) => ~"matcher sequence"
}
}
}
}
}
}
pub fn can_begin_expr(t: &Token) -> bool {
match *t {
LPAREN => true,
LBRACE => true,
LBRACKET => true,
IDENT(_, _) => true,
UNDERSCORE => true,
TILDE => true,
LIT_CHAR(_) => true,
LIT_INT(_, _) => true,
LIT_UINT(_, _) => true,
LIT_INT_UNSUFFIXED(_) => true,
LIT_FLOAT(_, _) => true,
LIT_FLOAT_UNSUFFIXED(_) => true,
LIT_STR(_) => true,
LIT_STR_RAW(_, _) => true,
POUND => true,
AT => true,
NOT => true,
BINOP(MINUS) => true,
BINOP(STAR) => true,
BINOP(AND) => true,
BINOP(OR) => true, // in lambda syntax
OROR => true, // in lambda syntax
MOD_SEP => true,
INTERPOLATED(NtExpr(..))
| INTERPOLATED(NtIdent(..))
| INTERPOLATED(NtBlock(..))
| INTERPOLATED(NtPath(..)) => true,
_ => false
}
}
/// what's the opposite delimiter?
pub fn flip_delimiter(t: &token::Token) -> token::Token {
match *t {
LPAREN => RPAREN,
LBRACE => RBRACE,
LBRACKET => RBRACKET,
RPAREN => LPAREN,
RBRACE => LBRACE,
RBRACKET => LBRACKET,
_ => fail!()
}
}
pub fn is_lit(t: &Token) -> bool {
match *t {
LIT_CHAR(_) => true,
LIT_INT(_, _) => true,
LIT_UINT(_, _) => true,
LIT_INT_UNSUFFIXED(_) => true,
LIT_FLOAT(_, _) => true,
LIT_FLOAT_UNSUFFIXED(_) => true,
LIT_STR(_) => true,
LIT_STR_RAW(_, _) => true,
_ => false
}
}
pub fn is_ident(t: &Token) -> bool {
match *t { IDENT(_, _) => true, _ => false }
}
pub fn is_ident_or_path(t: &Token) -> bool {
match *t {
IDENT(_, _) | INTERPOLATED(NtPath(..)) => true,
_ => false
}
}
pub fn is_plain_ident(t: &Token) -> bool {
match *t { IDENT(_, false) => true, _ => false }
}
pub fn is_bar(t: &Token) -> bool {
match *t { BINOP(OR) | OROR => true, _ => false }
}
// Get the first "argument"
macro_rules! first {
( $first:expr, $( $remainder:expr, )* ) => ( $first )
}
// Get the last "argument" (has to be done recursively to avoid phoney local ambiguity error)
macro_rules! last {
( $first:expr, $( $remainder:expr, )+ ) => ( last!( $( $remainder, )+ ) );
( $first:expr, ) => ( $first )
}
// In this macro, there is the requirement that the name (the number) must be monotonically
// increasing by one in the special identifiers, starting at 0; the same holds for the keywords,
// except starting from the next number instead of zero, and with the additional exception that
// special identifiers are *also* allowed (they are deduplicated in the important place, the
// interner), an exception which is demonstrated by "static" and "self".
macro_rules! declare_special_idents_and_keywords {(
// So now, in these rules, why is each definition parenthesised?
// Answer: otherwise we get a spurious local ambiguity bug on the "}"
pub mod special_idents {
$( ($si_name:expr, $si_static:ident, $si_str:expr); )*
}
pub mod keywords {
'strict:
$( ($sk_name:expr, $sk_variant:ident, $sk_str:expr); )*
'reserved:
$( ($rk_name:expr, $rk_variant:ident, $rk_str:expr); )*
}
) => {
static STRICT_KEYWORD_START: Name = first!($( $sk_name, )*);
static STRICT_KEYWORD_FINAL: Name = last!($( $sk_name, )*);
static RESERVED_KEYWORD_START: Name = first!($( $rk_name, )*);
static RESERVED_KEYWORD_FINAL: Name = last!($( $rk_name, )*);
pub mod special_idents {
use ast::Ident;
$( pub static $si_static: Ident = Ident { name: $si_name, ctxt: 0 }; )*
}
/**
* All the valid words that have meaning in the Rust language.
*
* Rust keywords are either'strict' or'reserved'. Strict keywords may not
* appear as identifiers at all. Reserved keywords are not used anywhere in
* the language and may not appear as identifiers.
*/
pub mod keywords {
use ast::Ident;
pub enum Keyword {
$( $sk_variant, )*
$( $rk_variant, )*
}
impl Keyword {
pub fn to_ident(&self) -> Ident {
match *self {
$( $sk_variant => Ident { name: $sk_name, ctxt: 0 }, )*
$( $rk_variant => Ident { name: $rk_name, ctxt: 0 }, )*
}
}
}
}
fn mk_fresh_ident_interner() -> @IdentInterner {
// The indices here must correspond to the numbers in
// special_idents, in Keyword to_ident(), and in static
// constants below.
let init_vec = ~[
$( $si_str, )*
$( $sk_str, )*
$( $rk_str, )*
];
@interner::StrInterner::prefill(init_vec)
}
}}
// If the special idents get renumbered, remember to modify these two as appropriate
static SELF_KEYWORD_NAME: Name = 3;
static STATIC_KEYWORD_NAME: Name = 10;
declare_special_idents_and_keywords! {
pub mod special_idents {
// These ones are statics
(0, anon, "anon");
(1, invalid, ""); // ''
(2, clownshoes_extensions, "__extensions__");
(super::SELF_KEYWORD_NAME, self_, "self"); //'self'
// for matcher NTs
(4, tt, "tt");
(5, matchers, "matchers");
// outside of libsyntax
(6, arg, "arg");
(7, clownshoe_abi, "__rust_abi");
(8, main, "main");
(9, opaque, "<opaque>");
(super::STATIC_KEYWORD_NAME, statik, "static");
(11, clownshoes_foreign_mod, "__foreign_mod__");
(12, unnamed_field, "<unnamed_field>");
(13, type_self, "Self"); // `Self`
}
pub mod keywords {
// These ones are variants of the Keyword enum
'strict:
(14, As, "as");
(15, Break, "break");
(16, Const, "const");
(17, Do, "do");
(18, Else, "else");
(19, Enum, "enum");
(20, Extern, "extern");
(21, False, "false");
(22, Fn, "fn");
(23, For, "for");
(24, If, "if");
(25, Impl, "impl");
(26, In, "in");
(27, Let, "let");
(28, __LogLevel, "__log_level");
(29, Loop, "loop");
(30, Match, "match");
(31, Mod, "mod");
(32, Mut, "mut");
(33, Once, "once");
(34, Priv, "priv");
(35, Pub, "pub");
(36, Ref, "ref");
(37, Return, "return");
// Static and Self are also special idents (prefill de-dupes)
(super::STATIC_KEYWORD_NAME, Static, "static");
(super::SELF_KEYWORD_NAME, Self, "self");
(38, Struct, "struct");
(39, Super, "super");
(40, True, "true");
(41, Trait, "trait");
(42, Type, "type");
(43, Unsafe, "unsafe");
(44, Use, "use");
(45, While, "while");
(46, Continue, "continue");
(47, Proc, "proc");
(48, Box, "box");
'reserved:
(49, Alignof, "alignof");
(50, Be, "be");
(51, Offsetof, "offsetof");
(52, Pure, "pure");
(53, Sizeof, "sizeof");
(54, Typeof, "typeof");
(55, Unsized, "unsized");
(56, Yield, "yield");
}
}
/**
* Maps a token to a record specifying the corresponding binary
* operator
*/
pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> {
match *tok {
BINOP(STAR) => Some(ast::BiMul),
BINOP(SLASH) => Some(ast::BiDiv),
BINOP(PERCENT) => Some(ast::BiRem),
BINOP(PLUS) => Some(ast::BiAdd),
BINOP(MINUS) => Some(ast::BiSub),
BINOP(SHL) => Some(ast::BiShl),
BINOP(SHR) => Some(ast::BiShr),
BINOP(AND) => Some(ast::BiBitAnd),
BINOP(CARET) => Some(ast::BiBitXor),
BINOP(OR) => Some(ast::BiBitOr),
LT => Some(ast::BiLt),
LE => Some(ast::BiLe),
GE => Some(ast::BiGe),
GT => Some(ast::BiGt),
EQEQ => Some(ast::BiEq),
NE => Some(ast::BiNe),
ANDAND => Some(ast::BiAnd),
OROR => Some(ast::BiOr),
_ => None
}
}
// looks like we can get rid of this completely...
pub type IdentInterner = StrInterner;
// if an interner exists in TLS, return it. Otherwise, prepare a
// fresh one.
pub fn get_ident_interner() -> @IdentInterner {
local_data_key!(key: @@::parse::token::IdentInterner)
match local_data::get(key, |k| k.map(|k| *k)) {
Some(interner) => *interner,
None => {
let interner = mk_fresh_ident_interner();
local_data::set(key, @interner);
interner
}
}
}
/* for when we don't care about the contents; doesn't interact with TLD or
serialization */
pub fn mk_fake_ident_interner() -> @IdentInterner {
@interner::StrInterner::new()
}
// maps a string to its interned representation
pub fn intern(str : &str) -> Name {
let interner = get_ident_interner();
interner.intern(str)
}
// gensyms a new uint, using the current interner
pub fn gensym(str : &str) -> Name {
let interner = get_ident_interner();
interner.gensym(str)
}
// map an interned representation back to a string
pub fn interner_get(name : Name) -> @str {
get_ident_interner().get(name)
}
// maps an identifier to the string that it corresponds to
pub fn ident_to_str(id : &ast::Ident) -> @str {
interner_get(id.name)
}
// maps a string to an identifier with an empty syntax context
pub fn str_to_ident(str : &str) -> ast::Ident {
ast::Ident::new(intern(str))
}
// maps a string to a gensym'ed identifier
pub fn gensym_ident(str : &str) -> ast::Ident {
ast::Ident::new(gensym(str))
}
// create a fresh name that maps to the same string as the old one.
// note that this guarantees that str_ptr_eq(ident_to_str(src),interner_get(fresh_name(src)));
// that is, that the new name and the old one are connected to ptr_eq strings.
pub fn fresh_name(src : &ast::Ident) -> Name {
let interner = get_ident_interner();
interner.gensym_copy(src.name)
// following: debug version. Could work in final except that it's incompatible with
// good error messages and uses of struct names in ambiguous could-be-binding
// locations. Also definitely destroys the guarantee given above about ptr_eq.
/*let num = rand::rng().gen_uint_range(0,0xffff);
gensym(format!("{}_{}",ident_to_str(src),num))*/
}
// it looks like there oughta be a str_ptr_eq fn, but no one bothered to implement it?
// determine whether two @str values are pointer-equal
pub fn str_ptr_eq(a : @str, b : @str) -> bool {
unsafe {
let p : uint = cast::transmute(a);
let q : uint = cast::transmute(b);
let result = p == q;
// got to transmute them back, to make sure the ref count is correct:
let _junk1 : @str = cast::transmute(p);
let _junk2 : @str = cast::transmute(q);
result
}
}
// return true when two identifiers refer (through the intern table) to the same ptr_eq
// string. This is used to compare identifiers in places where hygienic comparison is
// not wanted (i.e. not lexical vars).
pub fn ident_spelling_eq(a : &ast::Ident, b : &ast::Ident) -> bool {
str_ptr_eq(interner_get(a.name),interner_get(b.name))
}
// create a fresh mark.
pub fn fresh_mark() -> Mrk {
gensym("mark")
}
// See the macro above about the types of keywords
pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => { kw.to_ident().name == sid.name }
_ => { false }
}
}
pub fn is_any_keyword(tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => match sid.name {
SELF_KEYWORD_NAME | STATIC_KEYWORD_NAME |
STRICT_KEYWORD_START.. RESERVED_KEYWORD_FINAL => true,
_ => false,
},
_ => false
}
}
pub fn is_strict_keyword(tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => match sid.name {
SELF_KEYWORD_NAME | STATIC_KEYWORD_NAME |
STRICT_KEYWORD_START.. STRICT_KEYWORD_FINAL => true,
_ => false,
},
_ => false,
}
}
pub fn is_reserved_keyword(tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => match sid.name {
RESERVED_KEYWORD_START.. RESERVED_KEYWORD_FINAL => true,
_ => false,
},
_ => false,
}
}
pub fn mtwt_token_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&IDENT(id1,_),&IDENT(id2,_)) =>
ast_util::mtwt_resolve(id1) == ast_util::mtwt_resolve(id2),
_ => *t1 == *t2
}
}
#[cfg(test)]
mod test {
use super::*;
use ast;
use ast_util;
fn mark_ident(id : ast::Ident, m : ast::Mrk) -> ast::Ident {
ast::Ident{name:id.name,ctxt:ast_util::new_mark(m,id.ctxt)}
}
#[test] fn
|
mtwt_token_eq_test
|
identifier_name
|
|
token.rs
|
UNDERSCORE,
LIFETIME(ast::Ident),
/* For interpolation */
INTERPOLATED(Nonterminal),
DOC_COMMENT(ast::Ident),
EOF,
}
#[deriving(Clone, Encodable, Decodable, Eq, IterBytes)]
/// For interpolation during macro expansion.
pub enum Nonterminal {
NtItem(@ast::Item),
NtBlock(P<ast::Block>),
NtStmt(@ast::Stmt),
NtPat( @ast::Pat),
NtExpr(@ast::Expr),
NtTy( P<ast::Ty>),
NtIdent(~ast::Ident, bool),
NtAttr(@ast::Attribute), // #[foo]
NtPath(~ast::Path),
NtTT( @ast::TokenTree), // needs @ed to break a circularity
NtMatchers(~[ast::Matcher])
}
pub fn binop_to_str(o: BinOp) -> ~str {
match o {
PLUS => ~"+",
MINUS => ~"-",
STAR => ~"*",
SLASH => ~"/",
PERCENT => ~"%",
CARET => ~"^",
AND => ~"&",
OR => ~"|",
SHL => ~"<<",
SHR => ~">>"
}
}
pub fn to_str(input: @IdentInterner, t: &Token) -> ~str {
match *t {
EQ => ~"=",
LT => ~"<",
LE => ~"<=",
EQEQ => ~"==",
NE => ~"!=",
GE => ~">=",
GT => ~">",
NOT => ~"!",
TILDE => ~"~",
OROR => ~"||",
ANDAND => ~"&&",
BINOP(op) => binop_to_str(op),
BINOPEQ(op) => binop_to_str(op) + "=",
/* Structural symbols */
AT => ~"@",
DOT => ~".",
DOTDOT => ~"..",
DOTDOTDOT => ~"...",
COMMA => ~",",
SEMI => ~";",
COLON => ~":",
MOD_SEP => ~"::",
RARROW => ~"->",
LARROW => ~"<-",
DARROW => ~"<->",
FAT_ARROW => ~"=>",
LPAREN => ~"(",
RPAREN => ~")",
LBRACKET => ~"[",
RBRACKET => ~"]",
LBRACE => ~"{",
RBRACE => ~"}",
POUND => ~"#",
DOLLAR => ~"$",
/* Literals */
LIT_CHAR(c) => {
let mut res = ~"'";
char::from_u32(c).unwrap().escape_default(|c| {
res.push_char(c);
});
res.push_char('\'');
res
}
LIT_INT(i, t) => {
i.to_str() + ast_util::int_ty_to_str(t)
}
LIT_UINT(u, t) => {
u.to_str() + ast_util::uint_ty_to_str(t)
}
LIT_INT_UNSUFFIXED(i) => { i.to_str() }
LIT_FLOAT(ref s, t) => {
let mut body = ident_to_str(s).to_owned();
if body.ends_with(".") {
body.push_char('0'); // `10.f` is not a float literal
}
body + ast_util::float_ty_to_str(t)
}
LIT_FLOAT_UNSUFFIXED(ref s) => {
let mut body = ident_to_str(s).to_owned();
if body.ends_with(".") {
body.push_char('0'); // `10.f` is not a float literal
}
body
}
LIT_STR(ref s) => { format!("\"{}\"", ident_to_str(s).escape_default()) }
LIT_STR_RAW(ref s, n) => {
format!("r{delim}\"{string}\"{delim}",
delim="#".repeat(n), string=ident_to_str(s))
}
/* Name components */
IDENT(s, _) => input.get(s.name).to_owned(),
LIFETIME(s) => format!("'{}", input.get(s.name)),
UNDERSCORE => ~"_",
/* Other */
DOC_COMMENT(ref s) => ident_to_str(s).to_owned(),
EOF => ~"<eof>",
INTERPOLATED(ref nt) => {
match nt {
&NtExpr(e) => ::print::pprust::expr_to_str(e, input),
&NtAttr(e) => ::print::pprust::attribute_to_str(e, input),
_ => {
~"an interpolated " +
match (*nt) {
NtItem(..) => ~"item",
NtBlock(..) => ~"block",
NtStmt(..) => ~"statement",
NtPat(..) => ~"pattern",
NtAttr(..) => fail!("should have been handled"),
NtExpr(..) => fail!("should have been handled above"),
NtTy(..) => ~"type",
NtIdent(..) => ~"identifier",
NtPath(..) => ~"path",
NtTT(..) => ~"tt",
NtMatchers(..) => ~"matcher sequence"
}
}
}
}
}
}
pub fn can_begin_expr(t: &Token) -> bool {
match *t {
LPAREN => true,
LBRACE => true,
LBRACKET => true,
IDENT(_, _) => true,
UNDERSCORE => true,
TILDE => true,
LIT_CHAR(_) => true,
LIT_INT(_, _) => true,
LIT_UINT(_, _) => true,
LIT_INT_UNSUFFIXED(_) => true,
LIT_FLOAT(_, _) => true,
LIT_FLOAT_UNSUFFIXED(_) => true,
LIT_STR(_) => true,
LIT_STR_RAW(_, _) => true,
POUND => true,
AT => true,
NOT => true,
BINOP(MINUS) => true,
BINOP(STAR) => true,
BINOP(AND) => true,
BINOP(OR) => true, // in lambda syntax
OROR => true, // in lambda syntax
MOD_SEP => true,
INTERPOLATED(NtExpr(..))
| INTERPOLATED(NtIdent(..))
| INTERPOLATED(NtBlock(..))
| INTERPOLATED(NtPath(..)) => true,
_ => false
}
}
/// what's the opposite delimiter?
pub fn flip_delimiter(t: &token::Token) -> token::Token {
match *t {
LPAREN => RPAREN,
LBRACE => RBRACE,
LBRACKET => RBRACKET,
RPAREN => LPAREN,
RBRACE => LBRACE,
RBRACKET => LBRACKET,
_ => fail!()
}
}
pub fn is_lit(t: &Token) -> bool {
match *t {
LIT_CHAR(_) => true,
LIT_INT(_, _) => true,
LIT_UINT(_, _) => true,
LIT_INT_UNSUFFIXED(_) => true,
LIT_FLOAT(_, _) => true,
LIT_FLOAT_UNSUFFIXED(_) => true,
LIT_STR(_) => true,
LIT_STR_RAW(_, _) => true,
_ => false
}
}
pub fn is_ident(t: &Token) -> bool {
match *t { IDENT(_, _) => true, _ => false }
}
pub fn is_ident_or_path(t: &Token) -> bool {
match *t {
IDENT(_, _) | INTERPOLATED(NtPath(..)) => true,
_ => false
}
}
pub fn is_plain_ident(t: &Token) -> bool {
match *t { IDENT(_, false) => true, _ => false }
}
pub fn is_bar(t: &Token) -> bool {
match *t { BINOP(OR) | OROR => true, _ => false }
}
// Get the first "argument"
macro_rules! first {
( $first:expr, $( $remainder:expr, )* ) => ( $first )
}
// Get the last "argument" (has to be done recursively to avoid phoney local ambiguity error)
macro_rules! last {
( $first:expr, $( $remainder:expr, )+ ) => ( last!( $( $remainder, )+ ) );
( $first:expr, ) => ( $first )
}
// In this macro, there is the requirement that the name (the number) must be monotonically
// increasing by one in the special identifiers, starting at 0; the same holds for the keywords,
// except starting from the next number instead of zero, and with the additional exception that
// special identifiers are *also* allowed (they are deduplicated in the important place, the
// interner), an exception which is demonstrated by "static" and "self".
macro_rules! declare_special_idents_and_keywords {(
// So now, in these rules, why is each definition parenthesised?
// Answer: otherwise we get a spurious local ambiguity bug on the "}"
pub mod special_idents {
$( ($si_name:expr, $si_static:ident, $si_str:expr); )*
}
pub mod keywords {
'strict:
$( ($sk_name:expr, $sk_variant:ident, $sk_str:expr); )*
'reserved:
$( ($rk_name:expr, $rk_variant:ident, $rk_str:expr); )*
}
) => {
static STRICT_KEYWORD_START: Name = first!($( $sk_name, )*);
static STRICT_KEYWORD_FINAL: Name = last!($( $sk_name, )*);
static RESERVED_KEYWORD_START: Name = first!($( $rk_name, )*);
static RESERVED_KEYWORD_FINAL: Name = last!($( $rk_name, )*);
pub mod special_idents {
use ast::Ident;
$( pub static $si_static: Ident = Ident { name: $si_name, ctxt: 0 }; )*
}
/**
* All the valid words that have meaning in the Rust language.
*
* Rust keywords are either'strict' or'reserved'. Strict keywords may not
* appear as identifiers at all. Reserved keywords are not used anywhere in
* the language and may not appear as identifiers.
*/
pub mod keywords {
use ast::Ident;
pub enum Keyword {
$( $sk_variant, )*
$( $rk_variant, )*
}
|
impl Keyword {
pub fn to_ident(&self) -> Ident {
match *self {
$( $sk_variant => Ident { name: $sk_name, ctxt: 0 }, )*
$( $rk_variant => Ident { name: $rk_name, ctxt: 0 }, )*
}
}
}
}
fn mk_fresh_ident_interner() -> @IdentInterner {
// The indices here must correspond to the numbers in
// special_idents, in Keyword to_ident(), and in static
// constants below.
let init_vec = ~[
$( $si_str, )*
$( $sk_str, )*
$( $rk_str, )*
];
@interner::StrInterner::prefill(init_vec)
}
}}
// If the special idents get renumbered, remember to modify these two as appropriate
static SELF_KEYWORD_NAME: Name = 3;
static STATIC_KEYWORD_NAME: Name = 10;
declare_special_idents_and_keywords! {
pub mod special_idents {
// These ones are statics
(0, anon, "anon");
(1, invalid, ""); // ''
(2, clownshoes_extensions, "__extensions__");
(super::SELF_KEYWORD_NAME, self_, "self"); //'self'
// for matcher NTs
(4, tt, "tt");
(5, matchers, "matchers");
// outside of libsyntax
(6, arg, "arg");
(7, clownshoe_abi, "__rust_abi");
(8, main, "main");
(9, opaque, "<opaque>");
(super::STATIC_KEYWORD_NAME, statik, "static");
(11, clownshoes_foreign_mod, "__foreign_mod__");
(12, unnamed_field, "<unnamed_field>");
(13, type_self, "Self"); // `Self`
}
pub mod keywords {
// These ones are variants of the Keyword enum
'strict:
(14, As, "as");
(15, Break, "break");
(16, Const, "const");
(17, Do, "do");
(18, Else, "else");
(19, Enum, "enum");
(20, Extern, "extern");
(21, False, "false");
(22, Fn, "fn");
(23, For, "for");
(24, If, "if");
(25, Impl, "impl");
(26, In, "in");
(27, Let, "let");
(28, __LogLevel, "__log_level");
(29, Loop, "loop");
(30, Match, "match");
(31, Mod, "mod");
(32, Mut, "mut");
(33, Once, "once");
(34, Priv, "priv");
(35, Pub, "pub");
(36, Ref, "ref");
(37, Return, "return");
// Static and Self are also special idents (prefill de-dupes)
(super::STATIC_KEYWORD_NAME, Static, "static");
(super::SELF_KEYWORD_NAME, Self, "self");
(38, Struct, "struct");
(39, Super, "super");
(40, True, "true");
(41, Trait, "trait");
(42, Type, "type");
(43, Unsafe, "unsafe");
(44, Use, "use");
(45, While, "while");
(46, Continue, "continue");
(47, Proc, "proc");
(48, Box, "box");
'reserved:
(49, Alignof, "alignof");
(50, Be, "be");
(51, Offsetof, "offsetof");
(52, Pure, "pure");
(53, Sizeof, "sizeof");
(54, Typeof, "typeof");
(55, Unsized, "unsized");
(56, Yield, "yield");
}
}
/**
* Maps a token to a record specifying the corresponding binary
* operator
*/
pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> {
match *tok {
BINOP(STAR) => Some(ast::BiMul),
BINOP(SLASH) => Some(ast::BiDiv),
BINOP(PERCENT) => Some(ast::BiRem),
BINOP(PLUS) => Some(ast::BiAdd),
BINOP(MINUS) => Some(ast::BiSub),
BINOP(SHL) => Some(ast::BiShl),
BINOP(SHR) => Some(ast::BiShr),
BINOP(AND) => Some(ast::BiBitAnd),
BINOP(CARET) => Some(ast::BiBitXor),
BINOP(OR) => Some(ast::BiBitOr),
LT => Some(ast::BiLt),
LE => Some(ast::BiLe),
GE => Some(ast::BiGe),
GT => Some(ast::BiGt),
EQEQ => Some(ast::BiEq),
NE => Some(ast::BiNe),
ANDAND => Some(ast::BiAnd),
OROR => Some(ast::BiOr),
_ => None
}
}
// looks like we can get rid of this completely...
pub type IdentInterner = StrInterner;
// if an interner exists in TLS, return it. Otherwise, prepare a
// fresh one.
pub fn get_ident_interner() -> @IdentInterner {
local_data_key!(key: @@::parse::token::IdentInterner)
match local_data::get(key, |k| k.map(|k| *k)) {
Some(interner) => *interner,
None => {
let interner = mk_fresh_ident_interner();
local_data::set(key, @interner);
interner
}
}
}
/* for when we don't care about the contents; doesn't interact with TLD or
serialization */
pub fn mk_fake_ident_interner() -> @IdentInterner {
@interner::StrInterner::new()
}
// maps a string to its interned representation
pub fn intern(str : &str) -> Name {
let interner = get_ident_interner();
interner.intern(str)
}
// gensyms a new uint, using the current interner
pub fn gensym(str : &str) -> Name {
let interner = get_ident_interner();
interner.gensym(str)
}
// map an interned representation back to a string
pub fn interner_get(name : Name) -> @str {
get_ident_interner().get(name)
}
// maps an identifier to the string that it corresponds to
pub fn ident_to_str(id : &
|
random_line_split
|
|
token.rs
|
UNDERSCORE,
LIFETIME(ast::Ident),
/* For interpolation */
INTERPOLATED(Nonterminal),
DOC_COMMENT(ast::Ident),
EOF,
}
#[deriving(Clone, Encodable, Decodable, Eq, IterBytes)]
/// For interpolation during macro expansion.
pub enum Nonterminal {
NtItem(@ast::Item),
NtBlock(P<ast::Block>),
NtStmt(@ast::Stmt),
NtPat( @ast::Pat),
NtExpr(@ast::Expr),
NtTy( P<ast::Ty>),
NtIdent(~ast::Ident, bool),
NtAttr(@ast::Attribute), // #[foo]
NtPath(~ast::Path),
NtTT( @ast::TokenTree), // needs @ed to break a circularity
NtMatchers(~[ast::Matcher])
}
pub fn binop_to_str(o: BinOp) -> ~str {
match o {
PLUS => ~"+",
MINUS => ~"-",
STAR => ~"*",
SLASH => ~"/",
PERCENT => ~"%",
CARET => ~"^",
AND => ~"&",
OR => ~"|",
SHL => ~"<<",
SHR => ~">>"
}
}
pub fn to_str(input: @IdentInterner, t: &Token) -> ~str {
match *t {
EQ => ~"=",
LT => ~"<",
LE => ~"<=",
EQEQ => ~"==",
NE => ~"!=",
GE => ~">=",
GT => ~">",
NOT => ~"!",
TILDE => ~"~",
OROR => ~"||",
ANDAND => ~"&&",
BINOP(op) => binop_to_str(op),
BINOPEQ(op) => binop_to_str(op) + "=",
/* Structural symbols */
AT => ~"@",
DOT => ~".",
DOTDOT => ~"..",
DOTDOTDOT => ~"...",
COMMA => ~",",
SEMI => ~";",
COLON => ~":",
MOD_SEP => ~"::",
RARROW => ~"->",
LARROW => ~"<-",
DARROW => ~"<->",
FAT_ARROW => ~"=>",
LPAREN => ~"(",
RPAREN => ~")",
LBRACKET => ~"[",
RBRACKET => ~"]",
LBRACE => ~"{",
RBRACE => ~"}",
POUND => ~"#",
DOLLAR => ~"$",
/* Literals */
LIT_CHAR(c) => {
let mut res = ~"'";
char::from_u32(c).unwrap().escape_default(|c| {
res.push_char(c);
});
res.push_char('\'');
res
}
LIT_INT(i, t) => {
i.to_str() + ast_util::int_ty_to_str(t)
}
LIT_UINT(u, t) => {
u.to_str() + ast_util::uint_ty_to_str(t)
}
LIT_INT_UNSUFFIXED(i) => { i.to_str() }
LIT_FLOAT(ref s, t) => {
let mut body = ident_to_str(s).to_owned();
if body.ends_with(".") {
body.push_char('0'); // `10.f` is not a float literal
}
body + ast_util::float_ty_to_str(t)
}
LIT_FLOAT_UNSUFFIXED(ref s) => {
let mut body = ident_to_str(s).to_owned();
if body.ends_with(".") {
body.push_char('0'); // `10.f` is not a float literal
}
body
}
LIT_STR(ref s) => { format!("\"{}\"", ident_to_str(s).escape_default()) }
LIT_STR_RAW(ref s, n) => {
format!("r{delim}\"{string}\"{delim}",
delim="#".repeat(n), string=ident_to_str(s))
}
/* Name components */
IDENT(s, _) => input.get(s.name).to_owned(),
LIFETIME(s) => format!("'{}", input.get(s.name)),
UNDERSCORE => ~"_",
/* Other */
DOC_COMMENT(ref s) => ident_to_str(s).to_owned(),
EOF => ~"<eof>",
INTERPOLATED(ref nt) => {
match nt {
&NtExpr(e) => ::print::pprust::expr_to_str(e, input),
&NtAttr(e) => ::print::pprust::attribute_to_str(e, input),
_ => {
~"an interpolated " +
match (*nt) {
NtItem(..) => ~"item",
NtBlock(..) => ~"block",
NtStmt(..) => ~"statement",
NtPat(..) => ~"pattern",
NtAttr(..) => fail!("should have been handled"),
NtExpr(..) => fail!("should have been handled above"),
NtTy(..) => ~"type",
NtIdent(..) => ~"identifier",
NtPath(..) => ~"path",
NtTT(..) => ~"tt",
NtMatchers(..) => ~"matcher sequence"
}
}
}
}
}
}
pub fn can_begin_expr(t: &Token) -> bool {
match *t {
LPAREN => true,
LBRACE => true,
LBRACKET => true,
IDENT(_, _) => true,
UNDERSCORE => true,
TILDE => true,
LIT_CHAR(_) => true,
LIT_INT(_, _) => true,
LIT_UINT(_, _) => true,
LIT_INT_UNSUFFIXED(_) => true,
LIT_FLOAT(_, _) => true,
LIT_FLOAT_UNSUFFIXED(_) => true,
LIT_STR(_) => true,
LIT_STR_RAW(_, _) => true,
POUND => true,
AT => true,
NOT => true,
BINOP(MINUS) => true,
BINOP(STAR) => true,
BINOP(AND) => true,
BINOP(OR) => true, // in lambda syntax
OROR => true, // in lambda syntax
MOD_SEP => true,
INTERPOLATED(NtExpr(..))
| INTERPOLATED(NtIdent(..))
| INTERPOLATED(NtBlock(..))
| INTERPOLATED(NtPath(..)) => true,
_ => false
}
}
/// what's the opposite delimiter?
pub fn flip_delimiter(t: &token::Token) -> token::Token {
match *t {
LPAREN => RPAREN,
LBRACE => RBRACE,
LBRACKET => RBRACKET,
RPAREN => LPAREN,
RBRACE => LBRACE,
RBRACKET => LBRACKET,
_ => fail!()
}
}
pub fn is_lit(t: &Token) -> bool {
match *t {
LIT_CHAR(_) => true,
LIT_INT(_, _) => true,
LIT_UINT(_, _) => true,
LIT_INT_UNSUFFIXED(_) => true,
LIT_FLOAT(_, _) => true,
LIT_FLOAT_UNSUFFIXED(_) => true,
LIT_STR(_) => true,
LIT_STR_RAW(_, _) => true,
_ => false
}
}
pub fn is_ident(t: &Token) -> bool {
match *t { IDENT(_, _) => true, _ => false }
}
pub fn is_ident_or_path(t: &Token) -> bool {
match *t {
IDENT(_, _) | INTERPOLATED(NtPath(..)) => true,
_ => false
}
}
pub fn is_plain_ident(t: &Token) -> bool {
match *t { IDENT(_, false) => true, _ => false }
}
pub fn is_bar(t: &Token) -> bool {
match *t { BINOP(OR) | OROR => true, _ => false }
}
// Get the first "argument"
macro_rules! first {
( $first:expr, $( $remainder:expr, )* ) => ( $first )
}
// Get the last "argument" (has to be done recursively to avoid phoney local ambiguity error)
macro_rules! last {
( $first:expr, $( $remainder:expr, )+ ) => ( last!( $( $remainder, )+ ) );
( $first:expr, ) => ( $first )
}
// In this macro, there is the requirement that the name (the number) must be monotonically
// increasing by one in the special identifiers, starting at 0; the same holds for the keywords,
// except starting from the next number instead of zero, and with the additional exception that
// special identifiers are *also* allowed (they are deduplicated in the important place, the
// interner), an exception which is demonstrated by "static" and "self".
macro_rules! declare_special_idents_and_keywords {(
// So now, in these rules, why is each definition parenthesised?
// Answer: otherwise we get a spurious local ambiguity bug on the "}"
pub mod special_idents {
$( ($si_name:expr, $si_static:ident, $si_str:expr); )*
}
pub mod keywords {
'strict:
$( ($sk_name:expr, $sk_variant:ident, $sk_str:expr); )*
'reserved:
$( ($rk_name:expr, $rk_variant:ident, $rk_str:expr); )*
}
) => {
static STRICT_KEYWORD_START: Name = first!($( $sk_name, )*);
static STRICT_KEYWORD_FINAL: Name = last!($( $sk_name, )*);
static RESERVED_KEYWORD_START: Name = first!($( $rk_name, )*);
static RESERVED_KEYWORD_FINAL: Name = last!($( $rk_name, )*);
pub mod special_idents {
use ast::Ident;
$( pub static $si_static: Ident = Ident { name: $si_name, ctxt: 0 }; )*
}
/**
* All the valid words that have meaning in the Rust language.
*
* Rust keywords are either'strict' or'reserved'. Strict keywords may not
* appear as identifiers at all. Reserved keywords are not used anywhere in
* the language and may not appear as identifiers.
*/
pub mod keywords {
use ast::Ident;
pub enum Keyword {
$( $sk_variant, )*
$( $rk_variant, )*
}
impl Keyword {
pub fn to_ident(&self) -> Ident {
match *self {
$( $sk_variant => Ident { name: $sk_name, ctxt: 0 }, )*
$( $rk_variant => Ident { name: $rk_name, ctxt: 0 }, )*
}
}
}
}
fn mk_fresh_ident_interner() -> @IdentInterner {
// The indices here must correspond to the numbers in
// special_idents, in Keyword to_ident(), and in static
// constants below.
let init_vec = ~[
$( $si_str, )*
$( $sk_str, )*
$( $rk_str, )*
];
@interner::StrInterner::prefill(init_vec)
}
}}
// If the special idents get renumbered, remember to modify these two as appropriate
static SELF_KEYWORD_NAME: Name = 3;
static STATIC_KEYWORD_NAME: Name = 10;
declare_special_idents_and_keywords! {
pub mod special_idents {
// These ones are statics
(0, anon, "anon");
(1, invalid, ""); // ''
(2, clownshoes_extensions, "__extensions__");
(super::SELF_KEYWORD_NAME, self_, "self"); //'self'
// for matcher NTs
(4, tt, "tt");
(5, matchers, "matchers");
// outside of libsyntax
(6, arg, "arg");
(7, clownshoe_abi, "__rust_abi");
(8, main, "main");
(9, opaque, "<opaque>");
(super::STATIC_KEYWORD_NAME, statik, "static");
(11, clownshoes_foreign_mod, "__foreign_mod__");
(12, unnamed_field, "<unnamed_field>");
(13, type_self, "Self"); // `Self`
}
pub mod keywords {
// These ones are variants of the Keyword enum
'strict:
(14, As, "as");
(15, Break, "break");
(16, Const, "const");
(17, Do, "do");
(18, Else, "else");
(19, Enum, "enum");
(20, Extern, "extern");
(21, False, "false");
(22, Fn, "fn");
(23, For, "for");
(24, If, "if");
(25, Impl, "impl");
(26, In, "in");
(27, Let, "let");
(28, __LogLevel, "__log_level");
(29, Loop, "loop");
(30, Match, "match");
(31, Mod, "mod");
(32, Mut, "mut");
(33, Once, "once");
(34, Priv, "priv");
(35, Pub, "pub");
(36, Ref, "ref");
(37, Return, "return");
// Static and Self are also special idents (prefill de-dupes)
(super::STATIC_KEYWORD_NAME, Static, "static");
(super::SELF_KEYWORD_NAME, Self, "self");
(38, Struct, "struct");
(39, Super, "super");
(40, True, "true");
(41, Trait, "trait");
(42, Type, "type");
(43, Unsafe, "unsafe");
(44, Use, "use");
(45, While, "while");
(46, Continue, "continue");
(47, Proc, "proc");
(48, Box, "box");
'reserved:
(49, Alignof, "alignof");
(50, Be, "be");
(51, Offsetof, "offsetof");
(52, Pure, "pure");
(53, Sizeof, "sizeof");
(54, Typeof, "typeof");
(55, Unsized, "unsized");
(56, Yield, "yield");
}
}
/**
* Maps a token to a record specifying the corresponding binary
* operator
*/
pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> {
match *tok {
BINOP(STAR) => Some(ast::BiMul),
BINOP(SLASH) => Some(ast::BiDiv),
BINOP(PERCENT) => Some(ast::BiRem),
BINOP(PLUS) => Some(ast::BiAdd),
BINOP(MINUS) => Some(ast::BiSub),
BINOP(SHL) => Some(ast::BiShl),
BINOP(SHR) => Some(ast::BiShr),
BINOP(AND) => Some(ast::BiBitAnd),
BINOP(CARET) => Some(ast::BiBitXor),
BINOP(OR) => Some(ast::BiBitOr),
LT => Some(ast::BiLt),
LE => Some(ast::BiLe),
GE => Some(ast::BiGe),
GT => Some(ast::BiGt),
EQEQ => Some(ast::BiEq),
NE => Some(ast::BiNe),
ANDAND => Some(ast::BiAnd),
OROR => Some(ast::BiOr),
_ => None
}
}
// looks like we can get rid of this completely...
pub type IdentInterner = StrInterner;
// if an interner exists in TLS, return it. Otherwise, prepare a
// fresh one.
pub fn get_ident_interner() -> @IdentInterner {
local_data_key!(key: @@::parse::token::IdentInterner)
match local_data::get(key, |k| k.map(|k| *k)) {
Some(interner) => *interner,
None => {
let interner = mk_fresh_ident_interner();
local_data::set(key, @interner);
interner
}
}
}
/* for when we don't care about the contents; doesn't interact with TLD or
serialization */
pub fn mk_fake_ident_interner() -> @IdentInterner
|
// maps a string to its interned representation
pub fn intern(str : &str) -> Name {
let interner = get_ident_interner();
interner.intern(str)
}
// gensyms a new uint, using the current interner
pub fn gensym(str : &str) -> Name {
let interner = get_ident_interner();
interner.gensym(str)
}
// map an interned representation back to a string
pub fn interner_get(name : Name) -> @str {
get_ident_interner().get(name)
}
// maps an identifier to the string that it corresponds to
pub fn ident_to_str(id
|
{
@interner::StrInterner::new()
}
|
identifier_body
|
mod.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
pub mod field_eq;
pub mod field_exists;
pub mod field_grep;
pub mod field_gt;
|
pub mod field_path;
pub mod field_predicate;
pub mod version;
|
pub mod field_isempty;
pub mod field_istype;
pub mod field_lt;
|
random_line_split
|
dynamic_lib.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Dynamic library facilities.
A simple wrapper over the platform's dynamic library facilities
*/
#![experimental]
#![allow(missing_docs)]
use clone::Clone;
use c_str::ToCStr;
use iter::Iterator;
use mem;
use ops::*;
use option::*;
use os;
use path::{Path,GenericPath};
use result::*;
use slice::{AsSlice,SlicePrelude};
use str;
use string::String;
use vec::Vec;
pub struct DynamicLibrary { handle: *mut u8 }
impl Drop for DynamicLibrary {
fn drop(&mut self) {
match dl::check_for_errors_in(|| {
unsafe {
dl::close(self.handle)
}
}) {
Ok(()) => {},
Err(str) => panic!("{}", str)
}
}
}
impl DynamicLibrary {
// FIXME (#12938): Until DST lands, we cannot decompose &str into
// & and str, so we cannot usefully take ToCStr arguments by
// reference (without forcing an additional & around &str). So we
// are instead temporarily adding an instance for &Path, so that
// we can take ToCStr as owned. When DST lands, the &Path instance
// should be removed, and arguments bound by ToCStr should be
// passed by reference. (Here: in the `open` method.)
/// Lazily open a dynamic library. When passed None it gives a
/// handle to the calling process
pub fn open<T: ToCStr>(filename: Option<T>)
-> Result<DynamicLibrary, String> {
unsafe {
let mut filename = filename;
let maybe_library = dl::check_for_errors_in(|| {
match filename.take() {
Some(name) => dl::open_external(name),
None => dl::open_internal()
}
});
// The dynamic library must not be constructed if there is
// an error opening the library so the destructor does not
// run.
match maybe_library {
Err(err) => Err(err),
Ok(handle) => Ok(DynamicLibrary { handle: handle })
}
}
}
/// Prepends a path to this process's search path for dynamic libraries
pub fn prepend_search_path(path: &Path) {
let mut search_path = DynamicLibrary::search_path();
search_path.insert(0, path.clone());
let newval = DynamicLibrary::create_path(search_path.as_slice());
os::setenv(DynamicLibrary::envvar(),
str::from_utf8(newval.as_slice()).unwrap());
}
/// From a slice of paths, create a new vector which is suitable to be an
/// environment variable for this platforms dylib search path.
pub fn create_path(path: &[Path]) -> Vec<u8> {
let mut newvar = Vec::new();
for (i, path) in path.iter().enumerate() {
if i > 0 { newvar.push(DynamicLibrary::separator()); }
newvar.push_all(path.as_vec());
}
return newvar;
}
/// Returns the environment variable for this process's dynamic library
/// search path
pub fn envvar() -> &'static str {
if cfg!(windows) {
"PATH"
} else if cfg!(target_os = "macos") {
"DYLD_LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
fn separator() -> u8 {
if cfg!(windows) {b';'} else {b':'}
}
/// Returns the current search path for dynamic libraries being used by this
/// process
pub fn search_path() -> Vec<Path> {
let mut ret = Vec::new();
match os::getenv_as_bytes(DynamicLibrary::envvar()) {
Some(env) => {
for portion in
env.as_slice()
.split(|a| *a == DynamicLibrary::separator()) {
ret.push(Path::new(portion));
}
}
None => {}
}
return ret;
}
/// Access the value at the symbol of the dynamic library
pub unsafe fn symbol<T>(&self, symbol: &str) -> Result<*mut T, String> {
// This function should have a lifetime constraint of 'a on
// T but that feature is still unimplemented
let maybe_symbol_value = dl::check_for_errors_in(|| {
symbol.with_c_str(|raw_string| {
dl::symbol(self.handle, raw_string)
})
});
// The value must not be constructed if there is an error so
// the destructor does not run.
match maybe_symbol_value {
Err(err) => Err(err),
Ok(symbol_value) => Ok(mem::transmute(symbol_value))
}
}
}
#[cfg(all(test, not(target_os = "ios")))]
mod test {
use super::*;
use prelude::*;
use libc;
use mem;
#[test]
#[cfg_attr(any(windows, target_os = "android"), ignore)] // FIXME #8818, #10379
fn test_loading_cosine() {
// The math library does not need to be loaded since it is already
// statically linked in
let none: Option<Path> = None; // appease the typechecker
let libm = match DynamicLibrary::open(none) {
Err(error) => panic!("Could not load self as module: {}", error),
Ok(libm) => libm
};
let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe {
match libm.symbol("cos") {
Err(error) => panic!("Could not load function cos: {}", error),
Ok(cosine) => mem::transmute::<*mut u8, _>(cosine)
}
};
let argument = 0.0;
let expected_result = 1.0;
let result = cosine(argument);
if result!= expected_result {
panic!("cos({})!= {} but equaled {} instead", argument,
expected_result, result)
}
}
#[test]
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly"))]
fn test_errors_do_not_crash() {
// Open /dev/null as a library to get an error, and make sure
// that only causes an error, and not a crash.
let path = Path::new("/dev/null");
match DynamicLibrary::open(Some(&path)) {
Err(_) => {}
Ok(_) => panic!("Successfully opened the empty library.")
}
}
}
#[cfg(any(target_os = "linux",
target_os = "android",
target_os = "macos",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly"))]
pub mod dl {
use c_str::{CString, ToCStr};
use libc;
use ptr;
use result::*;
use string::String;
pub unsafe fn open_external<T: ToCStr>(filename: T) -> *mut u8 {
filename.with_c_str(|raw_name| {
dlopen(raw_name, Lazy as libc::c_int) as *mut u8
})
}
pub unsafe fn open_internal() -> *mut u8 {
dlopen(ptr::null(), Lazy as libc::c_int) as *mut u8
}
pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, String> {
use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
unsafe {
// dlerror isn't thread safe, so we need to lock around this entire
// sequence
let _guard = LOCK.lock();
let _old_error = dlerror();
let result = f();
let last_error = dlerror() as *const _;
let ret = if ptr::null() == last_error {
Ok(result)
} else {
Err(String::from_str(CString::new(last_error, false).as_str()
.unwrap()))
};
ret
}
}
pub unsafe fn symbol(handle: *mut u8,
symbol: *const libc::c_char) -> *mut u8 {
dlsym(handle as *mut libc::c_void, symbol) as *mut u8
}
pub unsafe fn close(handle: *mut u8) {
dlclose(handle as *mut libc::c_void); ()
}
pub enum Rtld {
Lazy = 1,
Now = 2,
Global = 256,
Local = 0,
}
#[link_name = "dl"]
extern {
fn dlopen(filename: *const libc::c_char,
flag: libc::c_int) -> *mut libc::c_void;
fn dlerror() -> *mut libc::c_char;
fn dlsym(handle: *mut libc::c_void,
symbol: *const libc::c_char) -> *mut libc::c_void;
fn dlclose(handle: *mut libc::c_void) -> libc::c_int;
}
}
#[cfg(target_os = "windows")]
pub mod dl {
use c_str::ToCStr;
use iter::Iterator;
use libc;
use os;
use ptr;
use result::{Ok, Err, Result};
use slice::SlicePrelude;
use str::StrPrelude;
use str;
use string::String;
use vec::Vec;
pub unsafe fn open_external<T: ToCStr>(filename: T) -> *mut u8 {
// Windows expects Unicode data
let filename_cstr = filename.to_c_str();
let filename_str = str::from_utf8(filename_cstr.as_bytes_no_nul()).unwrap();
let mut filename_str: Vec<u16> = filename_str.utf16_units().collect();
filename_str.push(0);
LoadLibraryW(filename_str.as_ptr() as *const libc::c_void) as *mut u8
}
pub unsafe fn open_internal() -> *mut u8
|
pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, String> {
unsafe {
SetLastError(0);
let result = f();
let error = os::errno();
if 0 == error {
Ok(result)
} else {
Err(format!("Error code {}", error))
}
}
}
pub unsafe fn symbol(handle: *mut u8, symbol: *const libc::c_char) -> *mut u8 {
GetProcAddress(handle as *mut libc::c_void, symbol) as *mut u8
}
pub unsafe fn close(handle: *mut u8) {
FreeLibrary(handle as *mut libc::c_void); ()
}
#[allow(non_snake_case)]
extern "system" {
fn SetLastError(error: libc::size_t);
fn LoadLibraryW(name: *const libc::c_void) -> *mut libc::c_void;
fn GetModuleHandleExW(dwFlags: libc::DWORD, name: *const u16,
handle: *mut *mut libc::c_void)
-> *mut libc::c_void;
fn GetProcAddress(handle: *mut libc::c_void,
name: *const libc::c_char) -> *mut libc::c_void;
fn FreeLibrary(handle: *mut libc::c_void);
}
}
|
{
let mut handle = ptr::null_mut();
GetModuleHandleExW(0 as libc::DWORD, ptr::null(), &mut handle);
handle as *mut u8
}
|
identifier_body
|
dynamic_lib.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Dynamic library facilities.
A simple wrapper over the platform's dynamic library facilities
*/
#![experimental]
#![allow(missing_docs)]
use clone::Clone;
use c_str::ToCStr;
use iter::Iterator;
use mem;
use ops::*;
use option::*;
use os;
use path::{Path,GenericPath};
use result::*;
use slice::{AsSlice,SlicePrelude};
use str;
use string::String;
use vec::Vec;
pub struct DynamicLibrary { handle: *mut u8 }
impl Drop for DynamicLibrary {
fn drop(&mut self) {
match dl::check_for_errors_in(|| {
unsafe {
dl::close(self.handle)
}
}) {
Ok(()) => {},
Err(str) => panic!("{}", str)
}
}
}
impl DynamicLibrary {
// FIXME (#12938): Until DST lands, we cannot decompose &str into
// & and str, so we cannot usefully take ToCStr arguments by
// reference (without forcing an additional & around &str). So we
// are instead temporarily adding an instance for &Path, so that
// we can take ToCStr as owned. When DST lands, the &Path instance
// should be removed, and arguments bound by ToCStr should be
// passed by reference. (Here: in the `open` method.)
/// Lazily open a dynamic library. When passed None it gives a
/// handle to the calling process
pub fn open<T: ToCStr>(filename: Option<T>)
-> Result<DynamicLibrary, String> {
unsafe {
let mut filename = filename;
let maybe_library = dl::check_for_errors_in(|| {
match filename.take() {
Some(name) => dl::open_external(name),
None => dl::open_internal()
}
});
// The dynamic library must not be constructed if there is
// an error opening the library so the destructor does not
// run.
match maybe_library {
Err(err) => Err(err),
Ok(handle) => Ok(DynamicLibrary { handle: handle })
}
}
}
/// Prepends a path to this process's search path for dynamic libraries
pub fn prepend_search_path(path: &Path) {
let mut search_path = DynamicLibrary::search_path();
search_path.insert(0, path.clone());
let newval = DynamicLibrary::create_path(search_path.as_slice());
os::setenv(DynamicLibrary::envvar(),
str::from_utf8(newval.as_slice()).unwrap());
}
/// From a slice of paths, create a new vector which is suitable to be an
/// environment variable for this platforms dylib search path.
pub fn create_path(path: &[Path]) -> Vec<u8> {
let mut newvar = Vec::new();
for (i, path) in path.iter().enumerate() {
if i > 0 { newvar.push(DynamicLibrary::separator()); }
newvar.push_all(path.as_vec());
}
return newvar;
}
/// Returns the environment variable for this process's dynamic library
/// search path
pub fn envvar() -> &'static str {
if cfg!(windows) {
"PATH"
} else if cfg!(target_os = "macos") {
"DYLD_LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
fn separator() -> u8 {
if cfg!(windows) {b';'} else {b':'}
}
/// Returns the current search path for dynamic libraries being used by this
/// process
pub fn search_path() -> Vec<Path> {
let mut ret = Vec::new();
match os::getenv_as_bytes(DynamicLibrary::envvar()) {
Some(env) => {
for portion in
env.as_slice()
.split(|a| *a == DynamicLibrary::separator()) {
ret.push(Path::new(portion));
}
}
None => {}
}
return ret;
}
/// Access the value at the symbol of the dynamic library
pub unsafe fn symbol<T>(&self, symbol: &str) -> Result<*mut T, String> {
// This function should have a lifetime constraint of 'a on
// T but that feature is still unimplemented
let maybe_symbol_value = dl::check_for_errors_in(|| {
symbol.with_c_str(|raw_string| {
dl::symbol(self.handle, raw_string)
})
});
// The value must not be constructed if there is an error so
// the destructor does not run.
match maybe_symbol_value {
Err(err) => Err(err),
Ok(symbol_value) => Ok(mem::transmute(symbol_value))
}
}
}
#[cfg(all(test, not(target_os = "ios")))]
mod test {
use super::*;
use prelude::*;
use libc;
use mem;
#[test]
#[cfg_attr(any(windows, target_os = "android"), ignore)] // FIXME #8818, #10379
fn test_loading_cosine() {
// The math library does not need to be loaded since it is already
// statically linked in
let none: Option<Path> = None; // appease the typechecker
let libm = match DynamicLibrary::open(none) {
Err(error) => panic!("Could not load self as module: {}", error),
Ok(libm) => libm
};
let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe {
match libm.symbol("cos") {
Err(error) => panic!("Could not load function cos: {}", error),
Ok(cosine) => mem::transmute::<*mut u8, _>(cosine)
}
};
let argument = 0.0;
|
if result!= expected_result {
panic!("cos({})!= {} but equaled {} instead", argument,
expected_result, result)
}
}
#[test]
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly"))]
fn test_errors_do_not_crash() {
// Open /dev/null as a library to get an error, and make sure
// that only causes an error, and not a crash.
let path = Path::new("/dev/null");
match DynamicLibrary::open(Some(&path)) {
Err(_) => {}
Ok(_) => panic!("Successfully opened the empty library.")
}
}
}
#[cfg(any(target_os = "linux",
target_os = "android",
target_os = "macos",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly"))]
pub mod dl {
use c_str::{CString, ToCStr};
use libc;
use ptr;
use result::*;
use string::String;
pub unsafe fn open_external<T: ToCStr>(filename: T) -> *mut u8 {
filename.with_c_str(|raw_name| {
dlopen(raw_name, Lazy as libc::c_int) as *mut u8
})
}
pub unsafe fn open_internal() -> *mut u8 {
dlopen(ptr::null(), Lazy as libc::c_int) as *mut u8
}
pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, String> {
use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
unsafe {
// dlerror isn't thread safe, so we need to lock around this entire
// sequence
let _guard = LOCK.lock();
let _old_error = dlerror();
let result = f();
let last_error = dlerror() as *const _;
let ret = if ptr::null() == last_error {
Ok(result)
} else {
Err(String::from_str(CString::new(last_error, false).as_str()
.unwrap()))
};
ret
}
}
pub unsafe fn symbol(handle: *mut u8,
symbol: *const libc::c_char) -> *mut u8 {
dlsym(handle as *mut libc::c_void, symbol) as *mut u8
}
pub unsafe fn close(handle: *mut u8) {
dlclose(handle as *mut libc::c_void); ()
}
pub enum Rtld {
Lazy = 1,
Now = 2,
Global = 256,
Local = 0,
}
#[link_name = "dl"]
extern {
fn dlopen(filename: *const libc::c_char,
flag: libc::c_int) -> *mut libc::c_void;
fn dlerror() -> *mut libc::c_char;
fn dlsym(handle: *mut libc::c_void,
symbol: *const libc::c_char) -> *mut libc::c_void;
fn dlclose(handle: *mut libc::c_void) -> libc::c_int;
}
}
#[cfg(target_os = "windows")]
pub mod dl {
use c_str::ToCStr;
use iter::Iterator;
use libc;
use os;
use ptr;
use result::{Ok, Err, Result};
use slice::SlicePrelude;
use str::StrPrelude;
use str;
use string::String;
use vec::Vec;
pub unsafe fn open_external<T: ToCStr>(filename: T) -> *mut u8 {
// Windows expects Unicode data
let filename_cstr = filename.to_c_str();
let filename_str = str::from_utf8(filename_cstr.as_bytes_no_nul()).unwrap();
let mut filename_str: Vec<u16> = filename_str.utf16_units().collect();
filename_str.push(0);
LoadLibraryW(filename_str.as_ptr() as *const libc::c_void) as *mut u8
}
pub unsafe fn open_internal() -> *mut u8 {
let mut handle = ptr::null_mut();
GetModuleHandleExW(0 as libc::DWORD, ptr::null(), &mut handle);
handle as *mut u8
}
pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, String> {
unsafe {
SetLastError(0);
let result = f();
let error = os::errno();
if 0 == error {
Ok(result)
} else {
Err(format!("Error code {}", error))
}
}
}
pub unsafe fn symbol(handle: *mut u8, symbol: *const libc::c_char) -> *mut u8 {
GetProcAddress(handle as *mut libc::c_void, symbol) as *mut u8
}
pub unsafe fn close(handle: *mut u8) {
FreeLibrary(handle as *mut libc::c_void); ()
}
#[allow(non_snake_case)]
extern "system" {
fn SetLastError(error: libc::size_t);
fn LoadLibraryW(name: *const libc::c_void) -> *mut libc::c_void;
fn GetModuleHandleExW(dwFlags: libc::DWORD, name: *const u16,
handle: *mut *mut libc::c_void)
-> *mut libc::c_void;
fn GetProcAddress(handle: *mut libc::c_void,
name: *const libc::c_char) -> *mut libc::c_void;
fn FreeLibrary(handle: *mut libc::c_void);
}
}
|
let expected_result = 1.0;
let result = cosine(argument);
|
random_line_split
|
dynamic_lib.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Dynamic library facilities.
A simple wrapper over the platform's dynamic library facilities
*/
#![experimental]
#![allow(missing_docs)]
use clone::Clone;
use c_str::ToCStr;
use iter::Iterator;
use mem;
use ops::*;
use option::*;
use os;
use path::{Path,GenericPath};
use result::*;
use slice::{AsSlice,SlicePrelude};
use str;
use string::String;
use vec::Vec;
pub struct DynamicLibrary { handle: *mut u8 }
impl Drop for DynamicLibrary {
fn drop(&mut self) {
match dl::check_for_errors_in(|| {
unsafe {
dl::close(self.handle)
}
}) {
Ok(()) => {},
Err(str) => panic!("{}", str)
}
}
}
impl DynamicLibrary {
// FIXME (#12938): Until DST lands, we cannot decompose &str into
// & and str, so we cannot usefully take ToCStr arguments by
// reference (without forcing an additional & around &str). So we
// are instead temporarily adding an instance for &Path, so that
// we can take ToCStr as owned. When DST lands, the &Path instance
// should be removed, and arguments bound by ToCStr should be
// passed by reference. (Here: in the `open` method.)
/// Lazily open a dynamic library. When passed None it gives a
/// handle to the calling process
pub fn open<T: ToCStr>(filename: Option<T>)
-> Result<DynamicLibrary, String> {
unsafe {
let mut filename = filename;
let maybe_library = dl::check_for_errors_in(|| {
match filename.take() {
Some(name) => dl::open_external(name),
None => dl::open_internal()
}
});
// The dynamic library must not be constructed if there is
// an error opening the library so the destructor does not
// run.
match maybe_library {
Err(err) => Err(err),
Ok(handle) => Ok(DynamicLibrary { handle: handle })
}
}
}
/// Prepends a path to this process's search path for dynamic libraries
pub fn prepend_search_path(path: &Path) {
let mut search_path = DynamicLibrary::search_path();
search_path.insert(0, path.clone());
let newval = DynamicLibrary::create_path(search_path.as_slice());
os::setenv(DynamicLibrary::envvar(),
str::from_utf8(newval.as_slice()).unwrap());
}
/// From a slice of paths, create a new vector which is suitable to be an
/// environment variable for this platforms dylib search path.
pub fn
|
(path: &[Path]) -> Vec<u8> {
let mut newvar = Vec::new();
for (i, path) in path.iter().enumerate() {
if i > 0 { newvar.push(DynamicLibrary::separator()); }
newvar.push_all(path.as_vec());
}
return newvar;
}
/// Returns the environment variable for this process's dynamic library
/// search path
pub fn envvar() -> &'static str {
if cfg!(windows) {
"PATH"
} else if cfg!(target_os = "macos") {
"DYLD_LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
fn separator() -> u8 {
if cfg!(windows) {b';'} else {b':'}
}
/// Returns the current search path for dynamic libraries being used by this
/// process
pub fn search_path() -> Vec<Path> {
let mut ret = Vec::new();
match os::getenv_as_bytes(DynamicLibrary::envvar()) {
Some(env) => {
for portion in
env.as_slice()
.split(|a| *a == DynamicLibrary::separator()) {
ret.push(Path::new(portion));
}
}
None => {}
}
return ret;
}
/// Access the value at the symbol of the dynamic library
pub unsafe fn symbol<T>(&self, symbol: &str) -> Result<*mut T, String> {
// This function should have a lifetime constraint of 'a on
// T but that feature is still unimplemented
let maybe_symbol_value = dl::check_for_errors_in(|| {
symbol.with_c_str(|raw_string| {
dl::symbol(self.handle, raw_string)
})
});
// The value must not be constructed if there is an error so
// the destructor does not run.
match maybe_symbol_value {
Err(err) => Err(err),
Ok(symbol_value) => Ok(mem::transmute(symbol_value))
}
}
}
#[cfg(all(test, not(target_os = "ios")))]
mod test {
use super::*;
use prelude::*;
use libc;
use mem;
#[test]
#[cfg_attr(any(windows, target_os = "android"), ignore)] // FIXME #8818, #10379
fn test_loading_cosine() {
// The math library does not need to be loaded since it is already
// statically linked in
let none: Option<Path> = None; // appease the typechecker
let libm = match DynamicLibrary::open(none) {
Err(error) => panic!("Could not load self as module: {}", error),
Ok(libm) => libm
};
let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe {
match libm.symbol("cos") {
Err(error) => panic!("Could not load function cos: {}", error),
Ok(cosine) => mem::transmute::<*mut u8, _>(cosine)
}
};
let argument = 0.0;
let expected_result = 1.0;
let result = cosine(argument);
if result!= expected_result {
panic!("cos({})!= {} but equaled {} instead", argument,
expected_result, result)
}
}
#[test]
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly"))]
fn test_errors_do_not_crash() {
// Open /dev/null as a library to get an error, and make sure
// that only causes an error, and not a crash.
let path = Path::new("/dev/null");
match DynamicLibrary::open(Some(&path)) {
Err(_) => {}
Ok(_) => panic!("Successfully opened the empty library.")
}
}
}
#[cfg(any(target_os = "linux",
target_os = "android",
target_os = "macos",
target_os = "ios",
target_os = "freebsd",
target_os = "dragonfly"))]
pub mod dl {
use c_str::{CString, ToCStr};
use libc;
use ptr;
use result::*;
use string::String;
pub unsafe fn open_external<T: ToCStr>(filename: T) -> *mut u8 {
filename.with_c_str(|raw_name| {
dlopen(raw_name, Lazy as libc::c_int) as *mut u8
})
}
pub unsafe fn open_internal() -> *mut u8 {
dlopen(ptr::null(), Lazy as libc::c_int) as *mut u8
}
pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, String> {
use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
unsafe {
// dlerror isn't thread safe, so we need to lock around this entire
// sequence
let _guard = LOCK.lock();
let _old_error = dlerror();
let result = f();
let last_error = dlerror() as *const _;
let ret = if ptr::null() == last_error {
Ok(result)
} else {
Err(String::from_str(CString::new(last_error, false).as_str()
.unwrap()))
};
ret
}
}
pub unsafe fn symbol(handle: *mut u8,
symbol: *const libc::c_char) -> *mut u8 {
dlsym(handle as *mut libc::c_void, symbol) as *mut u8
}
pub unsafe fn close(handle: *mut u8) {
dlclose(handle as *mut libc::c_void); ()
}
pub enum Rtld {
Lazy = 1,
Now = 2,
Global = 256,
Local = 0,
}
#[link_name = "dl"]
extern {
fn dlopen(filename: *const libc::c_char,
flag: libc::c_int) -> *mut libc::c_void;
fn dlerror() -> *mut libc::c_char;
fn dlsym(handle: *mut libc::c_void,
symbol: *const libc::c_char) -> *mut libc::c_void;
fn dlclose(handle: *mut libc::c_void) -> libc::c_int;
}
}
#[cfg(target_os = "windows")]
pub mod dl {
use c_str::ToCStr;
use iter::Iterator;
use libc;
use os;
use ptr;
use result::{Ok, Err, Result};
use slice::SlicePrelude;
use str::StrPrelude;
use str;
use string::String;
use vec::Vec;
pub unsafe fn open_external<T: ToCStr>(filename: T) -> *mut u8 {
// Windows expects Unicode data
let filename_cstr = filename.to_c_str();
let filename_str = str::from_utf8(filename_cstr.as_bytes_no_nul()).unwrap();
let mut filename_str: Vec<u16> = filename_str.utf16_units().collect();
filename_str.push(0);
LoadLibraryW(filename_str.as_ptr() as *const libc::c_void) as *mut u8
}
pub unsafe fn open_internal() -> *mut u8 {
let mut handle = ptr::null_mut();
GetModuleHandleExW(0 as libc::DWORD, ptr::null(), &mut handle);
handle as *mut u8
}
pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, String> {
unsafe {
SetLastError(0);
let result = f();
let error = os::errno();
if 0 == error {
Ok(result)
} else {
Err(format!("Error code {}", error))
}
}
}
pub unsafe fn symbol(handle: *mut u8, symbol: *const libc::c_char) -> *mut u8 {
GetProcAddress(handle as *mut libc::c_void, symbol) as *mut u8
}
pub unsafe fn close(handle: *mut u8) {
FreeLibrary(handle as *mut libc::c_void); ()
}
#[allow(non_snake_case)]
extern "system" {
fn SetLastError(error: libc::size_t);
fn LoadLibraryW(name: *const libc::c_void) -> *mut libc::c_void;
fn GetModuleHandleExW(dwFlags: libc::DWORD, name: *const u16,
handle: *mut *mut libc::c_void)
-> *mut libc::c_void;
fn GetProcAddress(handle: *mut libc::c_void,
name: *const libc::c_char) -> *mut libc::c_void;
fn FreeLibrary(handle: *mut libc::c_void);
}
}
|
create_path
|
identifier_name
|
debug.rs
|
use core::slice;
use drivers::io::{Io, Pio};
use super::Result;
pub fn do_sys_debug(ptr: *const u8, len: usize) -> Result<usize> {
let bytes = unsafe { slice::from_raw_parts(ptr, len) };
if unsafe { ::ENV_PTR.is_some() }
|
else {
let serial_status = Pio::<u8>::new(0x3F8 + 5);
let mut serial_data = Pio::<u8>::new(0x3F8);
for byte in bytes.iter() {
while!serial_status.readf(0x20) {}
serial_data.write(*byte);
if *byte == 8 {
while!serial_status.readf(0x20) {}
serial_data.write(0x20);
while!serial_status.readf(0x20) {}
serial_data.write(8);
}
}
}
Ok(len)
}
|
{
::env().console.lock().write(bytes);
}
|
conditional_block
|
debug.rs
|
use core::slice;
use drivers::io::{Io, Pio};
use super::Result;
pub fn do_sys_debug(ptr: *const u8, len: usize) -> Result<usize> {
let bytes = unsafe { slice::from_raw_parts(ptr, len) };
if unsafe { ::ENV_PTR.is_some() } {
::env().console.lock().write(bytes);
} else {
let serial_status = Pio::<u8>::new(0x3F8 + 5);
let mut serial_data = Pio::<u8>::new(0x3F8);
|
if *byte == 8 {
while!serial_status.readf(0x20) {}
serial_data.write(0x20);
while!serial_status.readf(0x20) {}
serial_data.write(8);
}
}
}
Ok(len)
}
|
for byte in bytes.iter() {
while !serial_status.readf(0x20) {}
serial_data.write(*byte);
|
random_line_split
|
debug.rs
|
use core::slice;
use drivers::io::{Io, Pio};
use super::Result;
pub fn do_sys_debug(ptr: *const u8, len: usize) -> Result<usize>
|
}
}
Ok(len)
}
|
{
let bytes = unsafe { slice::from_raw_parts(ptr, len) };
if unsafe { ::ENV_PTR.is_some() } {
::env().console.lock().write(bytes);
} else {
let serial_status = Pio::<u8>::new(0x3F8 + 5);
let mut serial_data = Pio::<u8>::new(0x3F8);
for byte in bytes.iter() {
while !serial_status.readf(0x20) {}
serial_data.write(*byte);
if *byte == 8 {
while !serial_status.readf(0x20) {}
serial_data.write(0x20);
while !serial_status.readf(0x20) {}
serial_data.write(8);
}
|
identifier_body
|
debug.rs
|
use core::slice;
use drivers::io::{Io, Pio};
use super::Result;
pub fn
|
(ptr: *const u8, len: usize) -> Result<usize> {
let bytes = unsafe { slice::from_raw_parts(ptr, len) };
if unsafe { ::ENV_PTR.is_some() } {
::env().console.lock().write(bytes);
} else {
let serial_status = Pio::<u8>::new(0x3F8 + 5);
let mut serial_data = Pio::<u8>::new(0x3F8);
for byte in bytes.iter() {
while!serial_status.readf(0x20) {}
serial_data.write(*byte);
if *byte == 8 {
while!serial_status.readf(0x20) {}
serial_data.write(0x20);
while!serial_status.readf(0x20) {}
serial_data.write(8);
}
}
}
Ok(len)
}
|
do_sys_debug
|
identifier_name
|
lpurge.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{agent_error::ImlAgentError, env};
use futures::future::TryFutureExt;
use std::{collections::HashMap, path::PathBuf};
use tokio::fs;
#[derive(serde::Deserialize, structopt::StructOpt, Debug)]
pub struct Config {
#[structopt(long)]
/// Filesystem Name
fs: String,
#[structopt(long)]
/// Ost Index
ost: u32,
#[structopt(long)]
/// OST pool name
pool: String,
#[structopt(long)]
freelo: u8,
#[structopt(long)]
freehi: u8,
#[structopt(long)]
mailbox: String,
}
impl Config {
fn generate_unit(&self, socket: String) -> String {
format!(
"\
device={fs}-OST{ost:04x}\n\
dryrun=true\n\
freehi={freehi}\n\
freelo={freelo}\n\
listen_socket={socket}\n\
max_jobs=0\n\
pool={fs}.{pool}\n\
",
freehi = self.freehi,
freelo = self.freelo,
socket = socket,
ost = self.ost,
fs = self.fs,
pool = self.pool,
)
}
}
fn expand_path_fmt(path_fmt: &str, c: &Config) -> strfmt::Result<String> {
let mut vars = HashMap::new();
let ost = format!("OST{:04x}", c.ost);
let freehi = c.freehi.to_string();
let freelo = c.freelo.to_string();
vars.insert("fs".to_string(), &c.fs);
vars.insert("ost".to_string(), &ost);
vars.insert("pool".to_string(), &c.pool);
vars.insert("freehi".to_string(), &freehi);
vars.insert("freelo".to_string(), &freelo);
vars.insert("mailbox".to_string(), &c.mailbox);
strfmt::strfmt(&path_fmt, &vars)
}
pub async fn create_lpurge_conf(c: Config) -> Result<(), ImlAgentError> {
let path_fmt = env::get_var("LPURGE_CONF_PATH");
let socket = env::mailbox_sock(&c.mailbox);
let file = conf_name(&c, &path_fmt).await?;
write(file, &c, socket).err_into().await
}
async fn conf_name(c: &Config, path_fmt: &str) -> Result<PathBuf, ImlAgentError> {
let path = PathBuf::from(expand_path_fmt(path_fmt, c)?);
Ok(path)
}
async fn write(file: PathBuf, c: &Config, socket: String) -> std::io::Result<()> {
if let Some(parent) = file.parent() {
fs::create_dir_all(&parent).await?;
}
let cnt = c.generate_unit(socket);
fs::write(file, cnt.as_bytes()).await
}
#[cfg(test)]
mod lpurge_conf_tests {
use super::*;
use insta::assert_display_snapshot;
use std::env;
use tempfile::tempdir;
#[tokio::test]
async fn works() {
let cfg = Config {
fs: "lima".to_string(),
pool: "santiago".to_string(),
ost: 16,
freehi: 123,
freelo: 60,
mailbox: "foobar".to_string(),
};
let dir = tempdir().expect("could not create temp file");
let file = dir.path().join("config");
let file2 = file.clone();
write(file, &cfg, "foobar".to_string())
.await
.expect("could not write");
let cnt = String::from_utf8(std::fs::read(&file2).expect("could not read file")).unwrap();
assert_display_snapshot!(cnt);
}
#[tokio::test]
async fn config_name()
|
}
|
{
let cfg = Config {
fs: "lima".to_string(),
pool: "santiago".to_string(),
ost: 16,
freehi: 123,
freelo: 60,
mailbox: "foobar".to_string(),
};
let file = conf_name(&cfg, "/etc/lpurge/{fs}/{ost}-{pool}.conf")
.await
.expect("name could not be created");
assert_eq!(
file,
PathBuf::from("/etc/lpurge/lima/OST0010-santiago.conf")
);
}
|
identifier_body
|
lpurge.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{agent_error::ImlAgentError, env};
use futures::future::TryFutureExt;
use std::{collections::HashMap, path::PathBuf};
use tokio::fs;
#[derive(serde::Deserialize, structopt::StructOpt, Debug)]
pub struct Config {
#[structopt(long)]
/// Filesystem Name
fs: String,
#[structopt(long)]
/// Ost Index
ost: u32,
#[structopt(long)]
/// OST pool name
pool: String,
#[structopt(long)]
freelo: u8,
#[structopt(long)]
freehi: u8,
#[structopt(long)]
mailbox: String,
}
impl Config {
fn generate_unit(&self, socket: String) -> String {
format!(
"\
device={fs}-OST{ost:04x}\n\
dryrun=true\n\
freehi={freehi}\n\
freelo={freelo}\n\
listen_socket={socket}\n\
max_jobs=0\n\
pool={fs}.{pool}\n\
",
freehi = self.freehi,
freelo = self.freelo,
socket = socket,
ost = self.ost,
fs = self.fs,
pool = self.pool,
)
}
}
fn expand_path_fmt(path_fmt: &str, c: &Config) -> strfmt::Result<String> {
let mut vars = HashMap::new();
let ost = format!("OST{:04x}", c.ost);
let freehi = c.freehi.to_string();
let freelo = c.freelo.to_string();
vars.insert("fs".to_string(), &c.fs);
vars.insert("ost".to_string(), &ost);
vars.insert("pool".to_string(), &c.pool);
vars.insert("freehi".to_string(), &freehi);
vars.insert("freelo".to_string(), &freelo);
vars.insert("mailbox".to_string(), &c.mailbox);
strfmt::strfmt(&path_fmt, &vars)
}
pub async fn create_lpurge_conf(c: Config) -> Result<(), ImlAgentError> {
let path_fmt = env::get_var("LPURGE_CONF_PATH");
let socket = env::mailbox_sock(&c.mailbox);
let file = conf_name(&c, &path_fmt).await?;
write(file, &c, socket).err_into().await
}
async fn conf_name(c: &Config, path_fmt: &str) -> Result<PathBuf, ImlAgentError> {
let path = PathBuf::from(expand_path_fmt(path_fmt, c)?);
Ok(path)
}
async fn write(file: PathBuf, c: &Config, socket: String) -> std::io::Result<()> {
if let Some(parent) = file.parent() {
fs::create_dir_all(&parent).await?;
}
let cnt = c.generate_unit(socket);
fs::write(file, cnt.as_bytes()).await
}
#[cfg(test)]
mod lpurge_conf_tests {
use super::*;
use insta::assert_display_snapshot;
use std::env;
use tempfile::tempdir;
#[tokio::test]
async fn works() {
let cfg = Config {
fs: "lima".to_string(),
pool: "santiago".to_string(),
ost: 16,
freehi: 123,
freelo: 60,
mailbox: "foobar".to_string(),
};
let dir = tempdir().expect("could not create temp file");
let file = dir.path().join("config");
let file2 = file.clone();
write(file, &cfg, "foobar".to_string())
.await
.expect("could not write");
let cnt = String::from_utf8(std::fs::read(&file2).expect("could not read file")).unwrap();
assert_display_snapshot!(cnt);
}
#[tokio::test]
async fn
|
() {
let cfg = Config {
fs: "lima".to_string(),
pool: "santiago".to_string(),
ost: 16,
freehi: 123,
freelo: 60,
mailbox: "foobar".to_string(),
};
let file = conf_name(&cfg, "/etc/lpurge/{fs}/{ost}-{pool}.conf")
.await
.expect("name could not be created");
assert_eq!(
file,
PathBuf::from("/etc/lpurge/lima/OST0010-santiago.conf")
);
}
}
|
config_name
|
identifier_name
|
lpurge.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{agent_error::ImlAgentError, env};
use futures::future::TryFutureExt;
use std::{collections::HashMap, path::PathBuf};
use tokio::fs;
#[derive(serde::Deserialize, structopt::StructOpt, Debug)]
pub struct Config {
#[structopt(long)]
/// Filesystem Name
fs: String,
#[structopt(long)]
/// Ost Index
ost: u32,
#[structopt(long)]
/// OST pool name
pool: String,
#[structopt(long)]
freelo: u8,
#[structopt(long)]
freehi: u8,
#[structopt(long)]
mailbox: String,
}
impl Config {
fn generate_unit(&self, socket: String) -> String {
format!(
"\
device={fs}-OST{ost:04x}\n\
dryrun=true\n\
freehi={freehi}\n\
freelo={freelo}\n\
listen_socket={socket}\n\
max_jobs=0\n\
pool={fs}.{pool}\n\
",
freehi = self.freehi,
freelo = self.freelo,
socket = socket,
ost = self.ost,
fs = self.fs,
pool = self.pool,
)
}
}
fn expand_path_fmt(path_fmt: &str, c: &Config) -> strfmt::Result<String> {
let mut vars = HashMap::new();
let ost = format!("OST{:04x}", c.ost);
let freehi = c.freehi.to_string();
let freelo = c.freelo.to_string();
vars.insert("fs".to_string(), &c.fs);
vars.insert("ost".to_string(), &ost);
vars.insert("pool".to_string(), &c.pool);
vars.insert("freehi".to_string(), &freehi);
vars.insert("freelo".to_string(), &freelo);
vars.insert("mailbox".to_string(), &c.mailbox);
strfmt::strfmt(&path_fmt, &vars)
}
pub async fn create_lpurge_conf(c: Config) -> Result<(), ImlAgentError> {
let path_fmt = env::get_var("LPURGE_CONF_PATH");
let socket = env::mailbox_sock(&c.mailbox);
let file = conf_name(&c, &path_fmt).await?;
write(file, &c, socket).err_into().await
}
async fn conf_name(c: &Config, path_fmt: &str) -> Result<PathBuf, ImlAgentError> {
let path = PathBuf::from(expand_path_fmt(path_fmt, c)?);
Ok(path)
}
async fn write(file: PathBuf, c: &Config, socket: String) -> std::io::Result<()> {
if let Some(parent) = file.parent()
|
let cnt = c.generate_unit(socket);
fs::write(file, cnt.as_bytes()).await
}
#[cfg(test)]
mod lpurge_conf_tests {
use super::*;
use insta::assert_display_snapshot;
use std::env;
use tempfile::tempdir;
#[tokio::test]
async fn works() {
let cfg = Config {
fs: "lima".to_string(),
pool: "santiago".to_string(),
ost: 16,
freehi: 123,
freelo: 60,
mailbox: "foobar".to_string(),
};
let dir = tempdir().expect("could not create temp file");
let file = dir.path().join("config");
let file2 = file.clone();
write(file, &cfg, "foobar".to_string())
.await
.expect("could not write");
let cnt = String::from_utf8(std::fs::read(&file2).expect("could not read file")).unwrap();
assert_display_snapshot!(cnt);
}
#[tokio::test]
async fn config_name() {
let cfg = Config {
fs: "lima".to_string(),
pool: "santiago".to_string(),
ost: 16,
freehi: 123,
freelo: 60,
mailbox: "foobar".to_string(),
};
let file = conf_name(&cfg, "/etc/lpurge/{fs}/{ost}-{pool}.conf")
.await
.expect("name could not be created");
assert_eq!(
file,
PathBuf::from("/etc/lpurge/lima/OST0010-santiago.conf")
);
}
}
|
{
fs::create_dir_all(&parent).await?;
}
|
conditional_block
|
lpurge.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{agent_error::ImlAgentError, env};
use futures::future::TryFutureExt;
use std::{collections::HashMap, path::PathBuf};
use tokio::fs;
#[derive(serde::Deserialize, structopt::StructOpt, Debug)]
pub struct Config {
#[structopt(long)]
/// Filesystem Name
fs: String,
#[structopt(long)]
/// Ost Index
ost: u32,
#[structopt(long)]
/// OST pool name
pool: String,
#[structopt(long)]
freelo: u8,
#[structopt(long)]
freehi: u8,
#[structopt(long)]
mailbox: String,
}
impl Config {
fn generate_unit(&self, socket: String) -> String {
format!(
"\
device={fs}-OST{ost:04x}\n\
dryrun=true\n\
freehi={freehi}\n\
freelo={freelo}\n\
listen_socket={socket}\n\
max_jobs=0\n\
pool={fs}.{pool}\n\
",
freehi = self.freehi,
freelo = self.freelo,
socket = socket,
ost = self.ost,
fs = self.fs,
pool = self.pool,
)
}
}
fn expand_path_fmt(path_fmt: &str, c: &Config) -> strfmt::Result<String> {
let mut vars = HashMap::new();
let ost = format!("OST{:04x}", c.ost);
let freehi = c.freehi.to_string();
let freelo = c.freelo.to_string();
vars.insert("fs".to_string(), &c.fs);
vars.insert("ost".to_string(), &ost);
vars.insert("pool".to_string(), &c.pool);
vars.insert("freehi".to_string(), &freehi);
vars.insert("freelo".to_string(), &freelo);
vars.insert("mailbox".to_string(), &c.mailbox);
strfmt::strfmt(&path_fmt, &vars)
}
pub async fn create_lpurge_conf(c: Config) -> Result<(), ImlAgentError> {
let path_fmt = env::get_var("LPURGE_CONF_PATH");
let socket = env::mailbox_sock(&c.mailbox);
let file = conf_name(&c, &path_fmt).await?;
write(file, &c, socket).err_into().await
}
async fn conf_name(c: &Config, path_fmt: &str) -> Result<PathBuf, ImlAgentError> {
let path = PathBuf::from(expand_path_fmt(path_fmt, c)?);
Ok(path)
}
async fn write(file: PathBuf, c: &Config, socket: String) -> std::io::Result<()> {
if let Some(parent) = file.parent() {
fs::create_dir_all(&parent).await?;
}
let cnt = c.generate_unit(socket);
fs::write(file, cnt.as_bytes()).await
}
#[cfg(test)]
mod lpurge_conf_tests {
use super::*;
use insta::assert_display_snapshot;
use std::env;
use tempfile::tempdir;
#[tokio::test]
async fn works() {
let cfg = Config {
fs: "lima".to_string(),
pool: "santiago".to_string(),
ost: 16,
freehi: 123,
freelo: 60,
mailbox: "foobar".to_string(),
};
let dir = tempdir().expect("could not create temp file");
let file = dir.path().join("config");
let file2 = file.clone();
write(file, &cfg, "foobar".to_string())
.await
.expect("could not write");
let cnt = String::from_utf8(std::fs::read(&file2).expect("could not read file")).unwrap();
assert_display_snapshot!(cnt);
}
#[tokio::test]
async fn config_name() {
let cfg = Config {
fs: "lima".to_string(),
pool: "santiago".to_string(),
ost: 16,
freehi: 123,
freelo: 60,
mailbox: "foobar".to_string(),
};
let file = conf_name(&cfg, "/etc/lpurge/{fs}/{ost}-{pool}.conf")
|
.await
.expect("name could not be created");
assert_eq!(
file,
PathBuf::from("/etc/lpurge/lima/OST0010-santiago.conf")
);
}
}
|
random_line_split
|
|
check_const.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::def::*;
use middle::ty;
use middle::typeck;
use util::ppaux;
use syntax::ast::*;
use syntax::ast_util;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
in_const: bool
}
impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
fn with_const(&mut self, in_const: bool, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
let was_const = self.in_const;
self.in_const = in_const;
f(self);
self.in_const = was_const;
}
fn inside_const(&mut self, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
self.with_const(true, f);
}
fn outside_const(&mut self, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
self.with_const(false, f);
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &Item) {
check_item(self, i);
}
fn visit_pat(&mut self, p: &Pat) {
check_pat(self, p);
}
fn visit_expr(&mut self, ex: &Expr) {
if check_expr(self, ex) {
visit::walk_expr(self, ex);
}
}
}
pub fn check_crate(tcx: &ty::ctxt) {
visit::walk_crate(&mut CheckCrateVisitor { tcx: tcx, in_const: false },
tcx.map.krate());
tcx.sess.abort_if_errors();
}
fn check_item(v: &mut CheckCrateVisitor, it: &Item) {
match it.node {
ItemStatic(_, _, ref ex) |
ItemConst(_, ref ex) => {
v.inside_const(|v| v.visit_expr(&**ex));
}
ItemEnum(ref enum_definition, _) => {
for var in (*enum_definition).variants.iter() {
for ex in var.node.disr_expr.iter() {
v.inside_const(|v| v.visit_expr(&**ex));
}
}
}
_ => v.outside_const(|v| visit::walk_item(v, it))
}
}
fn check_pat(v: &mut CheckCrateVisitor, p: &Pat) {
fn is_str(e: &Expr) -> bool {
match e.node {
ExprBox(_, ref expr) => {
|
match expr.node {
ExprLit(ref lit) => ast_util::lit_is_str(&**lit),
_ => false,
}
}
_ => false,
}
}
match p.node {
// Let through plain ~-string literals here
PatLit(ref a) => if!is_str(&**a) { v.inside_const(|v| v.visit_expr(&**a)); },
PatRange(ref a, ref b) => {
if!is_str(&**a) { v.inside_const(|v| v.visit_expr(&**a)); }
if!is_str(&**b) { v.inside_const(|v| v.visit_expr(&**b)); }
}
_ => v.outside_const(|v| visit::walk_pat(v, p))
}
}
fn check_expr(v: &mut CheckCrateVisitor, e: &Expr) -> bool {
if!v.in_const { return true }
match e.node {
ExprUnary(UnDeref, _) => {}
ExprUnary(UnUniq, _) => {
span_err!(v.tcx.sess, e.span, E0010,
"cannot do allocations in constant expressions");
return false;
}
ExprLit(ref lit) if ast_util::lit_is_str(&**lit) => {}
ExprBinary(..) | ExprUnary(..) => {
let method_call = typeck::MethodCall::expr(e.id);
if v.tcx.method_map.borrow().contains_key(&method_call) {
span_err!(v.tcx.sess, e.span, E0011,
"user-defined operators are not allowed in constant \
expressions");
}
}
ExprLit(_) => (),
ExprCast(_, _) => {
let ety = ty::expr_ty(v.tcx, e);
if!ty::type_is_numeric(ety) &&!ty::type_is_unsafe_ptr(ety) {
span_err!(v.tcx.sess, e.span, E0012,
"can not cast to `{}` in a constant expression",
ppaux::ty_to_string(v.tcx, ety));
}
}
ExprPath(ref pth) => {
// NB: In the future you might wish to relax this slightly
// to handle on-demand instantiation of functions via
// foo::<bar> in a const. Currently that is only done on
// a path in trans::callee that only works in block contexts.
if!pth.segments.iter().all(|segment| segment.types.is_empty()) {
span_err!(v.tcx.sess, e.span, E0013,
"paths in constants may only refer to items without \
type parameters");
}
match v.tcx.def_map.borrow().find(&e.id) {
Some(&DefStatic(..)) |
Some(&DefConst(..)) |
Some(&DefFn(..)) |
Some(&DefVariant(_, _, _)) |
Some(&DefStruct(_)) => { }
Some(&def) => {
debug!("(checking const) found bad def: {}", def);
span_err!(v.tcx.sess, e.span, E0014,
"paths in constants may only refer to constants \
or functions");
}
None => {
v.tcx.sess.span_bug(e.span, "unbound path in const?!");
}
}
}
ExprCall(ref callee, _) => {
match v.tcx.def_map.borrow().find(&callee.id) {
Some(&DefStruct(..)) |
Some(&DefVariant(..)) => {} // OK.
_ => {
span_err!(v.tcx.sess, e.span, E0015,
"function calls in constants are limited to \
struct and enum constructors");
}
}
}
ExprBlock(ref block) => {
// Check all statements in the block
for stmt in block.stmts.iter() {
let block_span_err = |span|
span_err!(v.tcx.sess, span, E0016,
"blocks in constants are limited to items and \
tail expressions");
match stmt.node {
StmtDecl(ref span, _) => {
match span.node {
DeclLocal(_) => block_span_err(span.span),
// Item statements are allowed
DeclItem(_) => {}
}
}
StmtExpr(ref expr, _) => block_span_err(expr.span),
StmtSemi(ref semi, _) => block_span_err(semi.span),
StmtMac(..) => {
v.tcx.sess.span_bug(e.span, "unexpanded statement \
macro in const?!")
}
}
}
match block.expr {
Some(ref expr) => { check_expr(v, &**expr); }
None => {}
}
}
ExprVec(_) |
ExprAddrOf(MutImmutable, _) |
ExprParen(..) |
ExprField(..) |
ExprTupField(..) |
ExprIndex(..) |
ExprTup(..) |
ExprRepeat(..) |
ExprStruct(..) => {}
ExprAddrOf(_, ref inner) => {
match inner.node {
// Mutable slices are allowed.
ExprVec(_) => {}
_ => span_err!(v.tcx.sess, e.span, E0017,
"references in constants may only refer \
to immutable values")
}
}
_ => {
span_err!(v.tcx.sess, e.span, E0019,
"constant contains unimplemented expression type");
return false;
}
}
true
}
|
random_line_split
|
|
check_const.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::def::*;
use middle::ty;
use middle::typeck;
use util::ppaux;
use syntax::ast::*;
use syntax::ast_util;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
in_const: bool
}
impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
fn with_const(&mut self, in_const: bool, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
let was_const = self.in_const;
self.in_const = in_const;
f(self);
self.in_const = was_const;
}
fn inside_const(&mut self, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
self.with_const(true, f);
}
fn outside_const(&mut self, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
self.with_const(false, f);
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &Item) {
check_item(self, i);
}
fn
|
(&mut self, p: &Pat) {
check_pat(self, p);
}
fn visit_expr(&mut self, ex: &Expr) {
if check_expr(self, ex) {
visit::walk_expr(self, ex);
}
}
}
pub fn check_crate(tcx: &ty::ctxt) {
visit::walk_crate(&mut CheckCrateVisitor { tcx: tcx, in_const: false },
tcx.map.krate());
tcx.sess.abort_if_errors();
}
fn check_item(v: &mut CheckCrateVisitor, it: &Item) {
match it.node {
ItemStatic(_, _, ref ex) |
ItemConst(_, ref ex) => {
v.inside_const(|v| v.visit_expr(&**ex));
}
ItemEnum(ref enum_definition, _) => {
for var in (*enum_definition).variants.iter() {
for ex in var.node.disr_expr.iter() {
v.inside_const(|v| v.visit_expr(&**ex));
}
}
}
_ => v.outside_const(|v| visit::walk_item(v, it))
}
}
fn check_pat(v: &mut CheckCrateVisitor, p: &Pat) {
fn is_str(e: &Expr) -> bool {
match e.node {
ExprBox(_, ref expr) => {
match expr.node {
ExprLit(ref lit) => ast_util::lit_is_str(&**lit),
_ => false,
}
}
_ => false,
}
}
match p.node {
// Let through plain ~-string literals here
PatLit(ref a) => if!is_str(&**a) { v.inside_const(|v| v.visit_expr(&**a)); },
PatRange(ref a, ref b) => {
if!is_str(&**a) { v.inside_const(|v| v.visit_expr(&**a)); }
if!is_str(&**b) { v.inside_const(|v| v.visit_expr(&**b)); }
}
_ => v.outside_const(|v| visit::walk_pat(v, p))
}
}
fn check_expr(v: &mut CheckCrateVisitor, e: &Expr) -> bool {
if!v.in_const { return true }
match e.node {
ExprUnary(UnDeref, _) => {}
ExprUnary(UnUniq, _) => {
span_err!(v.tcx.sess, e.span, E0010,
"cannot do allocations in constant expressions");
return false;
}
ExprLit(ref lit) if ast_util::lit_is_str(&**lit) => {}
ExprBinary(..) | ExprUnary(..) => {
let method_call = typeck::MethodCall::expr(e.id);
if v.tcx.method_map.borrow().contains_key(&method_call) {
span_err!(v.tcx.sess, e.span, E0011,
"user-defined operators are not allowed in constant \
expressions");
}
}
ExprLit(_) => (),
ExprCast(_, _) => {
let ety = ty::expr_ty(v.tcx, e);
if!ty::type_is_numeric(ety) &&!ty::type_is_unsafe_ptr(ety) {
span_err!(v.tcx.sess, e.span, E0012,
"can not cast to `{}` in a constant expression",
ppaux::ty_to_string(v.tcx, ety));
}
}
ExprPath(ref pth) => {
// NB: In the future you might wish to relax this slightly
// to handle on-demand instantiation of functions via
// foo::<bar> in a const. Currently that is only done on
// a path in trans::callee that only works in block contexts.
if!pth.segments.iter().all(|segment| segment.types.is_empty()) {
span_err!(v.tcx.sess, e.span, E0013,
"paths in constants may only refer to items without \
type parameters");
}
match v.tcx.def_map.borrow().find(&e.id) {
Some(&DefStatic(..)) |
Some(&DefConst(..)) |
Some(&DefFn(..)) |
Some(&DefVariant(_, _, _)) |
Some(&DefStruct(_)) => { }
Some(&def) => {
debug!("(checking const) found bad def: {}", def);
span_err!(v.tcx.sess, e.span, E0014,
"paths in constants may only refer to constants \
or functions");
}
None => {
v.tcx.sess.span_bug(e.span, "unbound path in const?!");
}
}
}
ExprCall(ref callee, _) => {
match v.tcx.def_map.borrow().find(&callee.id) {
Some(&DefStruct(..)) |
Some(&DefVariant(..)) => {} // OK.
_ => {
span_err!(v.tcx.sess, e.span, E0015,
"function calls in constants are limited to \
struct and enum constructors");
}
}
}
ExprBlock(ref block) => {
// Check all statements in the block
for stmt in block.stmts.iter() {
let block_span_err = |span|
span_err!(v.tcx.sess, span, E0016,
"blocks in constants are limited to items and \
tail expressions");
match stmt.node {
StmtDecl(ref span, _) => {
match span.node {
DeclLocal(_) => block_span_err(span.span),
// Item statements are allowed
DeclItem(_) => {}
}
}
StmtExpr(ref expr, _) => block_span_err(expr.span),
StmtSemi(ref semi, _) => block_span_err(semi.span),
StmtMac(..) => {
v.tcx.sess.span_bug(e.span, "unexpanded statement \
macro in const?!")
}
}
}
match block.expr {
Some(ref expr) => { check_expr(v, &**expr); }
None => {}
}
}
ExprVec(_) |
ExprAddrOf(MutImmutable, _) |
ExprParen(..) |
ExprField(..) |
ExprTupField(..) |
ExprIndex(..) |
ExprTup(..) |
ExprRepeat(..) |
ExprStruct(..) => {}
ExprAddrOf(_, ref inner) => {
match inner.node {
// Mutable slices are allowed.
ExprVec(_) => {}
_ => span_err!(v.tcx.sess, e.span, E0017,
"references in constants may only refer \
to immutable values")
}
}
_ => {
span_err!(v.tcx.sess, e.span, E0019,
"constant contains unimplemented expression type");
return false;
}
}
true
}
|
visit_pat
|
identifier_name
|
check_const.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::def::*;
use middle::ty;
use middle::typeck;
use util::ppaux;
use syntax::ast::*;
use syntax::ast_util;
use syntax::visit::Visitor;
use syntax::visit;
struct CheckCrateVisitor<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
in_const: bool
}
impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> {
fn with_const(&mut self, in_const: bool, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
let was_const = self.in_const;
self.in_const = in_const;
f(self);
self.in_const = was_const;
}
fn inside_const(&mut self, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
self.with_const(true, f);
}
fn outside_const(&mut self, f: |&mut CheckCrateVisitor<'a, 'tcx>|) {
self.with_const(false, f);
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &Item) {
check_item(self, i);
}
fn visit_pat(&mut self, p: &Pat) {
check_pat(self, p);
}
fn visit_expr(&mut self, ex: &Expr) {
if check_expr(self, ex) {
visit::walk_expr(self, ex);
}
}
}
pub fn check_crate(tcx: &ty::ctxt) {
visit::walk_crate(&mut CheckCrateVisitor { tcx: tcx, in_const: false },
tcx.map.krate());
tcx.sess.abort_if_errors();
}
fn check_item(v: &mut CheckCrateVisitor, it: &Item) {
match it.node {
ItemStatic(_, _, ref ex) |
ItemConst(_, ref ex) => {
v.inside_const(|v| v.visit_expr(&**ex));
}
ItemEnum(ref enum_definition, _) => {
for var in (*enum_definition).variants.iter() {
for ex in var.node.disr_expr.iter() {
v.inside_const(|v| v.visit_expr(&**ex));
}
}
}
_ => v.outside_const(|v| visit::walk_item(v, it))
}
}
fn check_pat(v: &mut CheckCrateVisitor, p: &Pat) {
fn is_str(e: &Expr) -> bool {
match e.node {
ExprBox(_, ref expr) => {
match expr.node {
ExprLit(ref lit) => ast_util::lit_is_str(&**lit),
_ => false,
}
}
_ => false,
}
}
match p.node {
// Let through plain ~-string literals here
PatLit(ref a) => if!is_str(&**a) { v.inside_const(|v| v.visit_expr(&**a)); },
PatRange(ref a, ref b) => {
if!is_str(&**a) { v.inside_const(|v| v.visit_expr(&**a)); }
if!is_str(&**b) { v.inside_const(|v| v.visit_expr(&**b)); }
}
_ => v.outside_const(|v| visit::walk_pat(v, p))
}
}
fn check_expr(v: &mut CheckCrateVisitor, e: &Expr) -> bool {
if!v.in_const { return true }
match e.node {
ExprUnary(UnDeref, _) => {}
ExprUnary(UnUniq, _) => {
span_err!(v.tcx.sess, e.span, E0010,
"cannot do allocations in constant expressions");
return false;
}
ExprLit(ref lit) if ast_util::lit_is_str(&**lit) => {}
ExprBinary(..) | ExprUnary(..) => {
let method_call = typeck::MethodCall::expr(e.id);
if v.tcx.method_map.borrow().contains_key(&method_call) {
span_err!(v.tcx.sess, e.span, E0011,
"user-defined operators are not allowed in constant \
expressions");
}
}
ExprLit(_) => (),
ExprCast(_, _) =>
|
ExprPath(ref pth) => {
// NB: In the future you might wish to relax this slightly
// to handle on-demand instantiation of functions via
// foo::<bar> in a const. Currently that is only done on
// a path in trans::callee that only works in block contexts.
if!pth.segments.iter().all(|segment| segment.types.is_empty()) {
span_err!(v.tcx.sess, e.span, E0013,
"paths in constants may only refer to items without \
type parameters");
}
match v.tcx.def_map.borrow().find(&e.id) {
Some(&DefStatic(..)) |
Some(&DefConst(..)) |
Some(&DefFn(..)) |
Some(&DefVariant(_, _, _)) |
Some(&DefStruct(_)) => { }
Some(&def) => {
debug!("(checking const) found bad def: {}", def);
span_err!(v.tcx.sess, e.span, E0014,
"paths in constants may only refer to constants \
or functions");
}
None => {
v.tcx.sess.span_bug(e.span, "unbound path in const?!");
}
}
}
ExprCall(ref callee, _) => {
match v.tcx.def_map.borrow().find(&callee.id) {
Some(&DefStruct(..)) |
Some(&DefVariant(..)) => {} // OK.
_ => {
span_err!(v.tcx.sess, e.span, E0015,
"function calls in constants are limited to \
struct and enum constructors");
}
}
}
ExprBlock(ref block) => {
// Check all statements in the block
for stmt in block.stmts.iter() {
let block_span_err = |span|
span_err!(v.tcx.sess, span, E0016,
"blocks in constants are limited to items and \
tail expressions");
match stmt.node {
StmtDecl(ref span, _) => {
match span.node {
DeclLocal(_) => block_span_err(span.span),
// Item statements are allowed
DeclItem(_) => {}
}
}
StmtExpr(ref expr, _) => block_span_err(expr.span),
StmtSemi(ref semi, _) => block_span_err(semi.span),
StmtMac(..) => {
v.tcx.sess.span_bug(e.span, "unexpanded statement \
macro in const?!")
}
}
}
match block.expr {
Some(ref expr) => { check_expr(v, &**expr); }
None => {}
}
}
ExprVec(_) |
ExprAddrOf(MutImmutable, _) |
ExprParen(..) |
ExprField(..) |
ExprTupField(..) |
ExprIndex(..) |
ExprTup(..) |
ExprRepeat(..) |
ExprStruct(..) => {}
ExprAddrOf(_, ref inner) => {
match inner.node {
// Mutable slices are allowed.
ExprVec(_) => {}
_ => span_err!(v.tcx.sess, e.span, E0017,
"references in constants may only refer \
to immutable values")
}
}
_ => {
span_err!(v.tcx.sess, e.span, E0019,
"constant contains unimplemented expression type");
return false;
}
}
true
}
|
{
let ety = ty::expr_ty(v.tcx, e);
if !ty::type_is_numeric(ety) && !ty::type_is_unsafe_ptr(ety) {
span_err!(v.tcx.sess, e.span, E0012,
"can not cast to `{}` in a constant expression",
ppaux::ty_to_string(v.tcx, ety));
}
}
|
conditional_block
|
mod.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
mod scenes;
mod view;
use self::view::View;
use crate::gui::Window;
use crate::modes::{run_puzzle, Mode};
use crate::save::SaveData;
// ========================================================================= //
pub fn run_column_as_icy_em(
window: &mut Window,
save_data: &mut SaveData,
) -> Mode
|
// ========================================================================= //
|
{
let view = {
let visible_rect = window.visible_rect();
View::new(
&mut window.resources(),
visible_rect,
&save_data.game_mut().column_as_icy_em,
)
};
run_puzzle(window, save_data, view)
}
|
identifier_body
|
mod.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
mod scenes;
mod view;
use self::view::View;
use crate::gui::Window;
use crate::modes::{run_puzzle, Mode};
use crate::save::SaveData;
// ========================================================================= //
pub fn
|
(
window: &mut Window,
save_data: &mut SaveData,
) -> Mode {
let view = {
let visible_rect = window.visible_rect();
View::new(
&mut window.resources(),
visible_rect,
&save_data.game_mut().column_as_icy_em,
)
};
run_puzzle(window, save_data, view)
}
// ========================================================================= //
|
run_column_as_icy_em
|
identifier_name
|
mod.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
mod scenes;
mod view;
use self::view::View;
use crate::gui::Window;
use crate::modes::{run_puzzle, Mode};
use crate::save::SaveData;
|
save_data: &mut SaveData,
) -> Mode {
let view = {
let visible_rect = window.visible_rect();
View::new(
&mut window.resources(),
visible_rect,
&save_data.game_mut().column_as_icy_em,
)
};
run_puzzle(window, save_data, view)
}
// ========================================================================= //
|
// ========================================================================= //
pub fn run_column_as_icy_em(
window: &mut Window,
|
random_line_split
|
slow.rs
|
//! Slow, fallback cases where we cannot unambiguously round a float.
//!
//! This occurs when we cannot determine the exact representation using
//! both the fast path (native) cases nor the Lemire/Bellerophon algorithms,
//! and therefore must fallback to a slow, arbitrary-precision representation.
#![doc(hidden)]
use crate::bigint::{Bigint, Limb, LIMB_BITS};
use crate::extended_float::{extended_to_float, ExtendedFloat};
use crate::num::Float;
use crate::number::Number;
use crate::rounding::{round, round_down, round_nearest_tie_even};
use core::cmp;
// ALGORITHM
// ---------
/// Parse the significant digits and biased, binary exponent of a float.
///
/// This is a fallback algorithm that uses a big-integer representation
/// of the float, and therefore is considerably slower than faster
/// approximations. However, it will always determine how to round
/// the significant digits to the nearest machine float, allowing
/// use to handle near half-way cases.
///
/// Near half-way cases are halfway between two consecutive machine floats.
/// For example, the float `16777217.0` has a bitwise representation of
/// `100000000000000000000000 1`. Rounding to a single-precision float,
/// the trailing `1` is truncated. Using round-nearest, tie-even, any
/// value above `16777217.0` must be rounded up to `16777218.0`, while
/// any value before or equal to `16777217.0` must be rounded down
/// to `16777216.0`. These near-halfway conversions therefore may require
/// a large number of digits to unambiguously determine how to round.
#[inline]
pub fn slow<'a, F, Iter1, Iter2>(
num: Number,
fp: ExtendedFloat,
integer: Iter1,
fraction: Iter2,
) -> ExtendedFloat
where
F: Float,
Iter1: Iterator<Item = &'a u8> + Clone,
Iter2: Iterator<Item = &'a u8> + Clone,
{
// Ensure our preconditions are valid:
// 1. The significant digits are not shifted into place.
debug_assert!(fp.mant & (1 << 63)!= 0);
// This assumes the sign bit has already been parsed, and we're
// starting with the integer digits, and the float format has been
// correctly validated.
let sci_exp = scientific_exponent(&num);
// We have 2 major algorithms we use for this:
// 1. An algorithm with a finite number of digits and a positive exponent.
// 2. An algorithm with a finite number of digits and a negative exponent.
let (bigmant, digits) = parse_mantissa(integer, fraction, F::MAX_DIGITS);
let exponent = sci_exp + 1 - digits as i32;
if exponent >= 0 {
positive_digit_comp::<F>(bigmant, exponent)
} else {
negative_digit_comp::<F>(bigmant, fp, exponent)
}
}
/// Generate the significant digits with a positive exponent relative to mantissa.
pub fn
|
<F: Float>(mut bigmant: Bigint, exponent: i32) -> ExtendedFloat {
// Simple, we just need to multiply by the power of the radix.
// Now, we can calculate the mantissa and the exponent from this.
// The binary exponent is the binary exponent for the mantissa
// shifted to the hidden bit.
bigmant.pow(10, exponent as u32).unwrap();
// Get the exact representation of the float from the big integer.
// hi64 checks **all** the remaining bits after the mantissa,
// so it will check if **any** truncated digits exist.
let (mant, is_truncated) = bigmant.hi64();
let exp = bigmant.bit_length() as i32 - 64 + F::EXPONENT_BIAS;
let mut fp = ExtendedFloat {
mant,
exp,
};
// Shift the digits into position and determine if we need to round-up.
round::<F, _>(&mut fp, |f, s| {
round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| {
is_above || (is_halfway && is_truncated) || (is_odd && is_halfway)
});
});
fp
}
/// Generate the significant digits with a negative exponent relative to mantissa.
///
/// This algorithm is quite simple: we have the significant digits `m1 * b^N1`,
/// where `m1` is the bigint mantissa, `b` is the radix, and `N1` is the radix
/// exponent. We then calculate the theoretical representation of `b+h`, which
/// is `m2 * 2^N2`, where `m2` is the bigint mantissa and `N2` is the binary
/// exponent. If we had infinite, efficient floating precision, this would be
/// equal to `m1 / b^-N1` and then compare it to `m2 * 2^N2`.
///
/// Since we cannot divide and keep precision, we must multiply the other:
/// if we want to do `m1 / b^-N1 >= m2 * 2^N2`, we can do
/// `m1 >= m2 * b^-N1 * 2^N2` Going to the decimal case, we can show and example
/// and simplify this further: `m1 >= m2 * 2^N2 * 10^-N1`. Since we can remove
/// a power-of-two, this is `m1 >= m2 * 2^(N2 - N1) * 5^-N1`. Therefore, if
/// `N2 - N1 > 0`, we need have `m1 >= m2 * 2^(N2 - N1) * 5^-N1`, otherwise,
/// we have `m1 * 2^(N1 - N2) >= m2 * 5^-N1`, where the resulting exponents
/// are all positive.
///
/// This allows us to compare both floats using integers efficiently
/// without any loss of precision.
#[allow(clippy::comparison_chain)]
pub fn negative_digit_comp<F: Float>(
bigmant: Bigint,
mut fp: ExtendedFloat,
exponent: i32,
) -> ExtendedFloat {
// Ensure our preconditions are valid:
// 1. The significant digits are not shifted into place.
debug_assert!(fp.mant & (1 << 63)!= 0);
// Get the significant digits and radix exponent for the real digits.
let mut real_digits = bigmant;
let real_exp = exponent;
debug_assert!(real_exp < 0);
// Round down our extended-precision float and calculate `b`.
let mut b = fp;
round::<F, _>(&mut b, round_down);
let b = extended_to_float::<F>(b);
// Get the significant digits and the binary exponent for `b+h`.
let theor = bh(b);
let mut theor_digits = Bigint::from_u64(theor.mant);
let theor_exp = theor.exp;
// We need to scale the real digits and `b+h` digits to be the same
// order. We currently have `real_exp`, in `radix`, that needs to be
// shifted to `theor_digits` (since it is negative), and `theor_exp`
// to either `theor_digits` or `real_digits` as a power of 2 (since it
// may be positive or negative). Try to remove as many powers of 2
// as possible. All values are relative to `theor_digits`, that is,
// reflect the power you need to multiply `theor_digits` by.
//
// Both are on opposite-sides of equation, can factor out a
// power of two.
//
// Example: 10^-10, 2^-10 -> ( 0, 10, 0)
// Example: 10^-10, 2^-15 -> (-5, 10, 0)
// Example: 10^-10, 2^-5 -> ( 5, 10, 0)
// Example: 10^-10, 2^5 -> (15, 10, 0)
let binary_exp = theor_exp - real_exp;
let halfradix_exp = -real_exp;
if halfradix_exp!= 0 {
theor_digits.pow(5, halfradix_exp as u32).unwrap();
}
if binary_exp > 0 {
theor_digits.pow(2, binary_exp as u32).unwrap();
} else if binary_exp < 0 {
real_digits.pow(2, (-binary_exp) as u32).unwrap();
}
// Compare our theoretical and real digits and round nearest, tie even.
let ord = real_digits.data.cmp(&theor_digits.data);
round::<F, _>(&mut fp, |f, s| {
round_nearest_tie_even(f, s, |is_odd, _, _| {
// Can ignore `is_halfway` and `is_above`, since those were
// calculates using less significant digits.
match ord {
cmp::Ordering::Greater => true,
cmp::Ordering::Less => false,
cmp::Ordering::Equal if is_odd => true,
cmp::Ordering::Equal => false,
}
});
});
fp
}
/// Add a digit to the temporary value.
macro_rules! add_digit {
($c:ident, $value:ident, $counter:ident, $count:ident) => {{
let digit = $c - b'0';
$value *= 10 as Limb;
$value += digit as Limb;
// Increment our counters.
$counter += 1;
$count += 1;
}};
}
/// Add a temporary value to our mantissa.
macro_rules! add_temporary {
// Multiply by the small power and add the native value.
(@mul $result:ident, $power:expr, $value:expr) => {
$result.data.mul_small($power).unwrap();
$result.data.add_small($value).unwrap();
};
// # Safety
//
// Safe is `counter <= step`, or smaller than the table size.
($format:ident, $result:ident, $counter:ident, $value:ident) => {
if $counter!= 0 {
// SAFETY: safe, since `counter <= step`, or smaller than the table size.
let small_power = unsafe { f64::int_pow_fast_path($counter, 10) };
add_temporary!(@mul $result, small_power as Limb, $value);
$counter = 0;
$value = 0;
}
};
// Add a temporary where we won't read the counter results internally.
//
// # Safety
//
// Safe is `counter <= step`, or smaller than the table size.
(@end $format:ident, $result:ident, $counter:ident, $value:ident) => {
if $counter!= 0 {
// SAFETY: safe, since `counter <= step`, or smaller than the table size.
let small_power = unsafe { f64::int_pow_fast_path($counter, 10) };
add_temporary!(@mul $result, small_power as Limb, $value);
}
};
// Add the maximum native value.
(@max $format:ident, $result:ident, $counter:ident, $value:ident, $max:ident) => {
add_temporary!(@mul $result, $max, $value);
$counter = 0;
$value = 0;
};
}
/// Round-up a truncated value.
macro_rules! round_up_truncated {
($format:ident, $result:ident, $count:ident) => {{
// Need to round-up.
// Can't just add 1, since this can accidentally round-up
// values to a halfway point, which can cause invalid results.
add_temporary!(@mul $result, 10, 1);
$count += 1;
}};
}
/// Check and round-up the fraction if any non-zero digits exist.
macro_rules! round_up_nonzero {
($format:ident, $iter:expr, $result:ident, $count:ident) => {{
for &digit in $iter {
if digit!= b'0' {
round_up_truncated!($format, $result, $count);
return ($result, $count);
}
}
}};
}
/// Parse the full mantissa into a big integer.
///
/// Returns the parsed mantissa and the number of digits in the mantissa.
/// The max digits is the maximum number of digits plus one.
pub fn parse_mantissa<'a, Iter1, Iter2>(
mut integer: Iter1,
mut fraction: Iter2,
max_digits: usize,
) -> (Bigint, usize)
where
Iter1: Iterator<Item = &'a u8> + Clone,
Iter2: Iterator<Item = &'a u8> + Clone,
{
// Iteratively process all the data in the mantissa.
// We do this via small, intermediate values which once we reach
// the maximum number of digits we can process without overflow,
// we add the temporary to the big integer.
let mut counter: usize = 0;
let mut count: usize = 0;
let mut value: Limb = 0;
let mut result = Bigint::new();
// Now use our pre-computed small powers iteratively.
// This is calculated as `⌊log(2^BITS - 1, 10)⌋`.
let step: usize = if LIMB_BITS == 32 {
9
} else {
19
};
let max_native = (10 as Limb).pow(step as u32);
// Process the integer digits.
'integer: loop {
// Parse a digit at a time, until we reach step.
while counter < step && count < max_digits {
if let Some(&c) = integer.next() {
add_digit!(c, value, counter, count);
} else {
break 'integer;
}
}
// Check if we've exhausted our max digits.
if count == max_digits {
// Need to check if we're truncated, and round-up accordingly.
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
round_up_nonzero!(format, integer, result, count);
round_up_nonzero!(format, fraction, result, count);
return (result, count);
} else {
// Add our temporary from the loop.
// SAFETY: safe since `counter <= step`.
add_temporary!(@max format, result, counter, value, max_native);
}
}
// Skip leading fraction zeros.
// Required to get an accurate count.
if count == 0 {
for &c in &mut fraction {
if c!= b'0' {
add_digit!(c, value, counter, count);
break;
}
}
}
// Process the fraction digits.
'fraction: loop {
// Parse a digit at a time, until we reach step.
while counter < step && count < max_digits {
if let Some(&c) = fraction.next() {
add_digit!(c, value, counter, count);
} else {
break 'fraction;
}
}
// Check if we've exhausted our max digits.
if count == max_digits {
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
round_up_nonzero!(format, fraction, result, count);
return (result, count);
} else {
// Add our temporary from the loop.
// SAFETY: safe since `counter <= step`.
add_temporary!(@max format, result, counter, value, max_native);
}
}
// We will always have a remainder, as long as we entered the loop
// once, or counter % step is 0.
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
(result, count)
}
// SCALING
// -------
/// Calculate the scientific exponent from a `Number` value.
/// Any other attempts would require slowdowns for faster algorithms.
#[inline]
pub fn scientific_exponent(num: &Number) -> i32 {
// Use power reduction to make this faster.
let mut mantissa = num.mantissa;
let mut exponent = num.exponent;
while mantissa >= 10000 {
mantissa /= 10000;
exponent += 4;
}
while mantissa >= 100 {
mantissa /= 100;
exponent += 2;
}
while mantissa >= 10 {
mantissa /= 10;
exponent += 1;
}
exponent as i32
}
/// Calculate `b` from a a representation of `b` as a float.
#[inline]
pub fn b<F: Float>(float: F) -> ExtendedFloat {
ExtendedFloat {
mant: float.mantissa(),
exp: float.exponent(),
}
}
/// Calculate `b+h` from a a representation of `b` as a float.
#[inline]
pub fn bh<F: Float>(float: F) -> ExtendedFloat {
let fp = b(float);
ExtendedFloat {
mant: (fp.mant << 1) + 1,
exp: fp.exp - 1,
}
}
|
positive_digit_comp
|
identifier_name
|
slow.rs
|
//! Slow, fallback cases where we cannot unambiguously round a float.
//!
//! This occurs when we cannot determine the exact representation using
//! both the fast path (native) cases nor the Lemire/Bellerophon algorithms,
//! and therefore must fallback to a slow, arbitrary-precision representation.
#![doc(hidden)]
use crate::bigint::{Bigint, Limb, LIMB_BITS};
use crate::extended_float::{extended_to_float, ExtendedFloat};
use crate::num::Float;
use crate::number::Number;
use crate::rounding::{round, round_down, round_nearest_tie_even};
use core::cmp;
// ALGORITHM
// ---------
/// Parse the significant digits and biased, binary exponent of a float.
///
/// This is a fallback algorithm that uses a big-integer representation
/// of the float, and therefore is considerably slower than faster
/// approximations. However, it will always determine how to round
/// the significant digits to the nearest machine float, allowing
/// use to handle near half-way cases.
///
/// Near half-way cases are halfway between two consecutive machine floats.
/// For example, the float `16777217.0` has a bitwise representation of
/// `100000000000000000000000 1`. Rounding to a single-precision float,
/// the trailing `1` is truncated. Using round-nearest, tie-even, any
/// value above `16777217.0` must be rounded up to `16777218.0`, while
/// any value before or equal to `16777217.0` must be rounded down
/// to `16777216.0`. These near-halfway conversions therefore may require
/// a large number of digits to unambiguously determine how to round.
#[inline]
pub fn slow<'a, F, Iter1, Iter2>(
num: Number,
fp: ExtendedFloat,
integer: Iter1,
fraction: Iter2,
) -> ExtendedFloat
where
F: Float,
Iter1: Iterator<Item = &'a u8> + Clone,
Iter2: Iterator<Item = &'a u8> + Clone,
{
// Ensure our preconditions are valid:
// 1. The significant digits are not shifted into place.
debug_assert!(fp.mant & (1 << 63)!= 0);
// This assumes the sign bit has already been parsed, and we're
// starting with the integer digits, and the float format has been
// correctly validated.
let sci_exp = scientific_exponent(&num);
// We have 2 major algorithms we use for this:
// 1. An algorithm with a finite number of digits and a positive exponent.
// 2. An algorithm with a finite number of digits and a negative exponent.
let (bigmant, digits) = parse_mantissa(integer, fraction, F::MAX_DIGITS);
let exponent = sci_exp + 1 - digits as i32;
if exponent >= 0 {
positive_digit_comp::<F>(bigmant, exponent)
} else {
negative_digit_comp::<F>(bigmant, fp, exponent)
}
}
/// Generate the significant digits with a positive exponent relative to mantissa.
pub fn positive_digit_comp<F: Float>(mut bigmant: Bigint, exponent: i32) -> ExtendedFloat {
// Simple, we just need to multiply by the power of the radix.
// Now, we can calculate the mantissa and the exponent from this.
// The binary exponent is the binary exponent for the mantissa
// shifted to the hidden bit.
bigmant.pow(10, exponent as u32).unwrap();
// Get the exact representation of the float from the big integer.
// hi64 checks **all** the remaining bits after the mantissa,
// so it will check if **any** truncated digits exist.
let (mant, is_truncated) = bigmant.hi64();
let exp = bigmant.bit_length() as i32 - 64 + F::EXPONENT_BIAS;
|
exp,
};
// Shift the digits into position and determine if we need to round-up.
round::<F, _>(&mut fp, |f, s| {
round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| {
is_above || (is_halfway && is_truncated) || (is_odd && is_halfway)
});
});
fp
}
/// Generate the significant digits with a negative exponent relative to mantissa.
///
/// This algorithm is quite simple: we have the significant digits `m1 * b^N1`,
/// where `m1` is the bigint mantissa, `b` is the radix, and `N1` is the radix
/// exponent. We then calculate the theoretical representation of `b+h`, which
/// is `m2 * 2^N2`, where `m2` is the bigint mantissa and `N2` is the binary
/// exponent. If we had infinite, efficient floating precision, this would be
/// equal to `m1 / b^-N1` and then compare it to `m2 * 2^N2`.
///
/// Since we cannot divide and keep precision, we must multiply the other:
/// if we want to do `m1 / b^-N1 >= m2 * 2^N2`, we can do
/// `m1 >= m2 * b^-N1 * 2^N2` Going to the decimal case, we can show and example
/// and simplify this further: `m1 >= m2 * 2^N2 * 10^-N1`. Since we can remove
/// a power-of-two, this is `m1 >= m2 * 2^(N2 - N1) * 5^-N1`. Therefore, if
/// `N2 - N1 > 0`, we need have `m1 >= m2 * 2^(N2 - N1) * 5^-N1`, otherwise,
/// we have `m1 * 2^(N1 - N2) >= m2 * 5^-N1`, where the resulting exponents
/// are all positive.
///
/// This allows us to compare both floats using integers efficiently
/// without any loss of precision.
#[allow(clippy::comparison_chain)]
pub fn negative_digit_comp<F: Float>(
bigmant: Bigint,
mut fp: ExtendedFloat,
exponent: i32,
) -> ExtendedFloat {
// Ensure our preconditions are valid:
// 1. The significant digits are not shifted into place.
debug_assert!(fp.mant & (1 << 63)!= 0);
// Get the significant digits and radix exponent for the real digits.
let mut real_digits = bigmant;
let real_exp = exponent;
debug_assert!(real_exp < 0);
// Round down our extended-precision float and calculate `b`.
let mut b = fp;
round::<F, _>(&mut b, round_down);
let b = extended_to_float::<F>(b);
// Get the significant digits and the binary exponent for `b+h`.
let theor = bh(b);
let mut theor_digits = Bigint::from_u64(theor.mant);
let theor_exp = theor.exp;
// We need to scale the real digits and `b+h` digits to be the same
// order. We currently have `real_exp`, in `radix`, that needs to be
// shifted to `theor_digits` (since it is negative), and `theor_exp`
// to either `theor_digits` or `real_digits` as a power of 2 (since it
// may be positive or negative). Try to remove as many powers of 2
// as possible. All values are relative to `theor_digits`, that is,
// reflect the power you need to multiply `theor_digits` by.
//
// Both are on opposite-sides of equation, can factor out a
// power of two.
//
// Example: 10^-10, 2^-10 -> ( 0, 10, 0)
// Example: 10^-10, 2^-15 -> (-5, 10, 0)
// Example: 10^-10, 2^-5 -> ( 5, 10, 0)
// Example: 10^-10, 2^5 -> (15, 10, 0)
let binary_exp = theor_exp - real_exp;
let halfradix_exp = -real_exp;
if halfradix_exp!= 0 {
theor_digits.pow(5, halfradix_exp as u32).unwrap();
}
if binary_exp > 0 {
theor_digits.pow(2, binary_exp as u32).unwrap();
} else if binary_exp < 0 {
real_digits.pow(2, (-binary_exp) as u32).unwrap();
}
// Compare our theoretical and real digits and round nearest, tie even.
let ord = real_digits.data.cmp(&theor_digits.data);
round::<F, _>(&mut fp, |f, s| {
round_nearest_tie_even(f, s, |is_odd, _, _| {
// Can ignore `is_halfway` and `is_above`, since those were
// calculates using less significant digits.
match ord {
cmp::Ordering::Greater => true,
cmp::Ordering::Less => false,
cmp::Ordering::Equal if is_odd => true,
cmp::Ordering::Equal => false,
}
});
});
fp
}
/// Add a digit to the temporary value.
macro_rules! add_digit {
($c:ident, $value:ident, $counter:ident, $count:ident) => {{
let digit = $c - b'0';
$value *= 10 as Limb;
$value += digit as Limb;
// Increment our counters.
$counter += 1;
$count += 1;
}};
}
/// Add a temporary value to our mantissa.
macro_rules! add_temporary {
// Multiply by the small power and add the native value.
(@mul $result:ident, $power:expr, $value:expr) => {
$result.data.mul_small($power).unwrap();
$result.data.add_small($value).unwrap();
};
// # Safety
//
// Safe is `counter <= step`, or smaller than the table size.
($format:ident, $result:ident, $counter:ident, $value:ident) => {
if $counter!= 0 {
// SAFETY: safe, since `counter <= step`, or smaller than the table size.
let small_power = unsafe { f64::int_pow_fast_path($counter, 10) };
add_temporary!(@mul $result, small_power as Limb, $value);
$counter = 0;
$value = 0;
}
};
// Add a temporary where we won't read the counter results internally.
//
// # Safety
//
// Safe is `counter <= step`, or smaller than the table size.
(@end $format:ident, $result:ident, $counter:ident, $value:ident) => {
if $counter!= 0 {
// SAFETY: safe, since `counter <= step`, or smaller than the table size.
let small_power = unsafe { f64::int_pow_fast_path($counter, 10) };
add_temporary!(@mul $result, small_power as Limb, $value);
}
};
// Add the maximum native value.
(@max $format:ident, $result:ident, $counter:ident, $value:ident, $max:ident) => {
add_temporary!(@mul $result, $max, $value);
$counter = 0;
$value = 0;
};
}
/// Round-up a truncated value.
macro_rules! round_up_truncated {
($format:ident, $result:ident, $count:ident) => {{
// Need to round-up.
// Can't just add 1, since this can accidentally round-up
// values to a halfway point, which can cause invalid results.
add_temporary!(@mul $result, 10, 1);
$count += 1;
}};
}
/// Check and round-up the fraction if any non-zero digits exist.
macro_rules! round_up_nonzero {
($format:ident, $iter:expr, $result:ident, $count:ident) => {{
for &digit in $iter {
if digit!= b'0' {
round_up_truncated!($format, $result, $count);
return ($result, $count);
}
}
}};
}
/// Parse the full mantissa into a big integer.
///
/// Returns the parsed mantissa and the number of digits in the mantissa.
/// The max digits is the maximum number of digits plus one.
pub fn parse_mantissa<'a, Iter1, Iter2>(
mut integer: Iter1,
mut fraction: Iter2,
max_digits: usize,
) -> (Bigint, usize)
where
Iter1: Iterator<Item = &'a u8> + Clone,
Iter2: Iterator<Item = &'a u8> + Clone,
{
// Iteratively process all the data in the mantissa.
// We do this via small, intermediate values which once we reach
// the maximum number of digits we can process without overflow,
// we add the temporary to the big integer.
let mut counter: usize = 0;
let mut count: usize = 0;
let mut value: Limb = 0;
let mut result = Bigint::new();
// Now use our pre-computed small powers iteratively.
// This is calculated as `⌊log(2^BITS - 1, 10)⌋`.
let step: usize = if LIMB_BITS == 32 {
9
} else {
19
};
let max_native = (10 as Limb).pow(step as u32);
// Process the integer digits.
'integer: loop {
// Parse a digit at a time, until we reach step.
while counter < step && count < max_digits {
if let Some(&c) = integer.next() {
add_digit!(c, value, counter, count);
} else {
break 'integer;
}
}
// Check if we've exhausted our max digits.
if count == max_digits {
// Need to check if we're truncated, and round-up accordingly.
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
round_up_nonzero!(format, integer, result, count);
round_up_nonzero!(format, fraction, result, count);
return (result, count);
} else {
// Add our temporary from the loop.
// SAFETY: safe since `counter <= step`.
add_temporary!(@max format, result, counter, value, max_native);
}
}
// Skip leading fraction zeros.
// Required to get an accurate count.
if count == 0 {
for &c in &mut fraction {
if c!= b'0' {
add_digit!(c, value, counter, count);
break;
}
}
}
// Process the fraction digits.
'fraction: loop {
// Parse a digit at a time, until we reach step.
while counter < step && count < max_digits {
if let Some(&c) = fraction.next() {
add_digit!(c, value, counter, count);
} else {
break 'fraction;
}
}
// Check if we've exhausted our max digits.
if count == max_digits {
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
round_up_nonzero!(format, fraction, result, count);
return (result, count);
} else {
// Add our temporary from the loop.
// SAFETY: safe since `counter <= step`.
add_temporary!(@max format, result, counter, value, max_native);
}
}
// We will always have a remainder, as long as we entered the loop
// once, or counter % step is 0.
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
(result, count)
}
// SCALING
// -------
/// Calculate the scientific exponent from a `Number` value.
/// Any other attempts would require slowdowns for faster algorithms.
#[inline]
pub fn scientific_exponent(num: &Number) -> i32 {
// Use power reduction to make this faster.
let mut mantissa = num.mantissa;
let mut exponent = num.exponent;
while mantissa >= 10000 {
mantissa /= 10000;
exponent += 4;
}
while mantissa >= 100 {
mantissa /= 100;
exponent += 2;
}
while mantissa >= 10 {
mantissa /= 10;
exponent += 1;
}
exponent as i32
}
/// Calculate `b` from a a representation of `b` as a float.
#[inline]
pub fn b<F: Float>(float: F) -> ExtendedFloat {
ExtendedFloat {
mant: float.mantissa(),
exp: float.exponent(),
}
}
/// Calculate `b+h` from a a representation of `b` as a float.
#[inline]
pub fn bh<F: Float>(float: F) -> ExtendedFloat {
let fp = b(float);
ExtendedFloat {
mant: (fp.mant << 1) + 1,
exp: fp.exp - 1,
}
}
|
let mut fp = ExtendedFloat {
mant,
|
random_line_split
|
slow.rs
|
//! Slow, fallback cases where we cannot unambiguously round a float.
//!
//! This occurs when we cannot determine the exact representation using
//! both the fast path (native) cases nor the Lemire/Bellerophon algorithms,
//! and therefore must fallback to a slow, arbitrary-precision representation.
#![doc(hidden)]
use crate::bigint::{Bigint, Limb, LIMB_BITS};
use crate::extended_float::{extended_to_float, ExtendedFloat};
use crate::num::Float;
use crate::number::Number;
use crate::rounding::{round, round_down, round_nearest_tie_even};
use core::cmp;
// ALGORITHM
// ---------
/// Parse the significant digits and biased, binary exponent of a float.
///
/// This is a fallback algorithm that uses a big-integer representation
/// of the float, and therefore is considerably slower than faster
/// approximations. However, it will always determine how to round
/// the significant digits to the nearest machine float, allowing
/// use to handle near half-way cases.
///
/// Near half-way cases are halfway between two consecutive machine floats.
/// For example, the float `16777217.0` has a bitwise representation of
/// `100000000000000000000000 1`. Rounding to a single-precision float,
/// the trailing `1` is truncated. Using round-nearest, tie-even, any
/// value above `16777217.0` must be rounded up to `16777218.0`, while
/// any value before or equal to `16777217.0` must be rounded down
/// to `16777216.0`. These near-halfway conversions therefore may require
/// a large number of digits to unambiguously determine how to round.
#[inline]
pub fn slow<'a, F, Iter1, Iter2>(
num: Number,
fp: ExtendedFloat,
integer: Iter1,
fraction: Iter2,
) -> ExtendedFloat
where
F: Float,
Iter1: Iterator<Item = &'a u8> + Clone,
Iter2: Iterator<Item = &'a u8> + Clone,
{
// Ensure our preconditions are valid:
// 1. The significant digits are not shifted into place.
debug_assert!(fp.mant & (1 << 63)!= 0);
// This assumes the sign bit has already been parsed, and we're
// starting with the integer digits, and the float format has been
// correctly validated.
let sci_exp = scientific_exponent(&num);
// We have 2 major algorithms we use for this:
// 1. An algorithm with a finite number of digits and a positive exponent.
// 2. An algorithm with a finite number of digits and a negative exponent.
let (bigmant, digits) = parse_mantissa(integer, fraction, F::MAX_DIGITS);
let exponent = sci_exp + 1 - digits as i32;
if exponent >= 0 {
positive_digit_comp::<F>(bigmant, exponent)
} else {
negative_digit_comp::<F>(bigmant, fp, exponent)
}
}
/// Generate the significant digits with a positive exponent relative to mantissa.
pub fn positive_digit_comp<F: Float>(mut bigmant: Bigint, exponent: i32) -> ExtendedFloat
|
is_above || (is_halfway && is_truncated) || (is_odd && is_halfway)
});
});
fp
}
/// Generate the significant digits with a negative exponent relative to mantissa.
///
/// This algorithm is quite simple: we have the significant digits `m1 * b^N1`,
/// where `m1` is the bigint mantissa, `b` is the radix, and `N1` is the radix
/// exponent. We then calculate the theoretical representation of `b+h`, which
/// is `m2 * 2^N2`, where `m2` is the bigint mantissa and `N2` is the binary
/// exponent. If we had infinite, efficient floating precision, this would be
/// equal to `m1 / b^-N1` and then compare it to `m2 * 2^N2`.
///
/// Since we cannot divide and keep precision, we must multiply the other:
/// if we want to do `m1 / b^-N1 >= m2 * 2^N2`, we can do
/// `m1 >= m2 * b^-N1 * 2^N2` Going to the decimal case, we can show and example
/// and simplify this further: `m1 >= m2 * 2^N2 * 10^-N1`. Since we can remove
/// a power-of-two, this is `m1 >= m2 * 2^(N2 - N1) * 5^-N1`. Therefore, if
/// `N2 - N1 > 0`, we need have `m1 >= m2 * 2^(N2 - N1) * 5^-N1`, otherwise,
/// we have `m1 * 2^(N1 - N2) >= m2 * 5^-N1`, where the resulting exponents
/// are all positive.
///
/// This allows us to compare both floats using integers efficiently
/// without any loss of precision.
#[allow(clippy::comparison_chain)]
pub fn negative_digit_comp<F: Float>(
bigmant: Bigint,
mut fp: ExtendedFloat,
exponent: i32,
) -> ExtendedFloat {
// Ensure our preconditions are valid:
// 1. The significant digits are not shifted into place.
debug_assert!(fp.mant & (1 << 63)!= 0);
// Get the significant digits and radix exponent for the real digits.
let mut real_digits = bigmant;
let real_exp = exponent;
debug_assert!(real_exp < 0);
// Round down our extended-precision float and calculate `b`.
let mut b = fp;
round::<F, _>(&mut b, round_down);
let b = extended_to_float::<F>(b);
// Get the significant digits and the binary exponent for `b+h`.
let theor = bh(b);
let mut theor_digits = Bigint::from_u64(theor.mant);
let theor_exp = theor.exp;
// We need to scale the real digits and `b+h` digits to be the same
// order. We currently have `real_exp`, in `radix`, that needs to be
// shifted to `theor_digits` (since it is negative), and `theor_exp`
// to either `theor_digits` or `real_digits` as a power of 2 (since it
// may be positive or negative). Try to remove as many powers of 2
// as possible. All values are relative to `theor_digits`, that is,
// reflect the power you need to multiply `theor_digits` by.
//
// Both are on opposite-sides of equation, can factor out a
// power of two.
//
// Example: 10^-10, 2^-10 -> ( 0, 10, 0)
// Example: 10^-10, 2^-15 -> (-5, 10, 0)
// Example: 10^-10, 2^-5 -> ( 5, 10, 0)
// Example: 10^-10, 2^5 -> (15, 10, 0)
let binary_exp = theor_exp - real_exp;
let halfradix_exp = -real_exp;
if halfradix_exp!= 0 {
theor_digits.pow(5, halfradix_exp as u32).unwrap();
}
if binary_exp > 0 {
theor_digits.pow(2, binary_exp as u32).unwrap();
} else if binary_exp < 0 {
real_digits.pow(2, (-binary_exp) as u32).unwrap();
}
// Compare our theoretical and real digits and round nearest, tie even.
let ord = real_digits.data.cmp(&theor_digits.data);
round::<F, _>(&mut fp, |f, s| {
round_nearest_tie_even(f, s, |is_odd, _, _| {
// Can ignore `is_halfway` and `is_above`, since those were
// calculates using less significant digits.
match ord {
cmp::Ordering::Greater => true,
cmp::Ordering::Less => false,
cmp::Ordering::Equal if is_odd => true,
cmp::Ordering::Equal => false,
}
});
});
fp
}
/// Add a digit to the temporary value.
macro_rules! add_digit {
($c:ident, $value:ident, $counter:ident, $count:ident) => {{
let digit = $c - b'0';
$value *= 10 as Limb;
$value += digit as Limb;
// Increment our counters.
$counter += 1;
$count += 1;
}};
}
/// Add a temporary value to our mantissa.
macro_rules! add_temporary {
// Multiply by the small power and add the native value.
(@mul $result:ident, $power:expr, $value:expr) => {
$result.data.mul_small($power).unwrap();
$result.data.add_small($value).unwrap();
};
// # Safety
//
// Safe is `counter <= step`, or smaller than the table size.
($format:ident, $result:ident, $counter:ident, $value:ident) => {
if $counter!= 0 {
// SAFETY: safe, since `counter <= step`, or smaller than the table size.
let small_power = unsafe { f64::int_pow_fast_path($counter, 10) };
add_temporary!(@mul $result, small_power as Limb, $value);
$counter = 0;
$value = 0;
}
};
// Add a temporary where we won't read the counter results internally.
//
// # Safety
//
// Safe is `counter <= step`, or smaller than the table size.
(@end $format:ident, $result:ident, $counter:ident, $value:ident) => {
if $counter!= 0 {
// SAFETY: safe, since `counter <= step`, or smaller than the table size.
let small_power = unsafe { f64::int_pow_fast_path($counter, 10) };
add_temporary!(@mul $result, small_power as Limb, $value);
}
};
// Add the maximum native value.
(@max $format:ident, $result:ident, $counter:ident, $value:ident, $max:ident) => {
add_temporary!(@mul $result, $max, $value);
$counter = 0;
$value = 0;
};
}
/// Round-up a truncated value.
macro_rules! round_up_truncated {
($format:ident, $result:ident, $count:ident) => {{
// Need to round-up.
// Can't just add 1, since this can accidentally round-up
// values to a halfway point, which can cause invalid results.
add_temporary!(@mul $result, 10, 1);
$count += 1;
}};
}
/// Check and round-up the fraction if any non-zero digits exist.
macro_rules! round_up_nonzero {
($format:ident, $iter:expr, $result:ident, $count:ident) => {{
for &digit in $iter {
if digit!= b'0' {
round_up_truncated!($format, $result, $count);
return ($result, $count);
}
}
}};
}
/// Parse the full mantissa into a big integer.
///
/// Returns the parsed mantissa and the number of digits in the mantissa.
/// The max digits is the maximum number of digits plus one.
pub fn parse_mantissa<'a, Iter1, Iter2>(
mut integer: Iter1,
mut fraction: Iter2,
max_digits: usize,
) -> (Bigint, usize)
where
Iter1: Iterator<Item = &'a u8> + Clone,
Iter2: Iterator<Item = &'a u8> + Clone,
{
// Iteratively process all the data in the mantissa.
// We do this via small, intermediate values which once we reach
// the maximum number of digits we can process without overflow,
// we add the temporary to the big integer.
let mut counter: usize = 0;
let mut count: usize = 0;
let mut value: Limb = 0;
let mut result = Bigint::new();
// Now use our pre-computed small powers iteratively.
// This is calculated as `⌊log(2^BITS - 1, 10)⌋`.
let step: usize = if LIMB_BITS == 32 {
9
} else {
19
};
let max_native = (10 as Limb).pow(step as u32);
// Process the integer digits.
'integer: loop {
// Parse a digit at a time, until we reach step.
while counter < step && count < max_digits {
if let Some(&c) = integer.next() {
add_digit!(c, value, counter, count);
} else {
break 'integer;
}
}
// Check if we've exhausted our max digits.
if count == max_digits {
// Need to check if we're truncated, and round-up accordingly.
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
round_up_nonzero!(format, integer, result, count);
round_up_nonzero!(format, fraction, result, count);
return (result, count);
} else {
// Add our temporary from the loop.
// SAFETY: safe since `counter <= step`.
add_temporary!(@max format, result, counter, value, max_native);
}
}
// Skip leading fraction zeros.
// Required to get an accurate count.
if count == 0 {
for &c in &mut fraction {
if c!= b'0' {
add_digit!(c, value, counter, count);
break;
}
}
}
// Process the fraction digits.
'fraction: loop {
// Parse a digit at a time, until we reach step.
while counter < step && count < max_digits {
if let Some(&c) = fraction.next() {
add_digit!(c, value, counter, count);
} else {
break 'fraction;
}
}
// Check if we've exhausted our max digits.
if count == max_digits {
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
round_up_nonzero!(format, fraction, result, count);
return (result, count);
} else {
// Add our temporary from the loop.
// SAFETY: safe since `counter <= step`.
add_temporary!(@max format, result, counter, value, max_native);
}
}
// We will always have a remainder, as long as we entered the loop
// once, or counter % step is 0.
// SAFETY: safe since `counter <= step`.
add_temporary!(@end format, result, counter, value);
(result, count)
}
// SCALING
// -------
/// Calculate the scientific exponent from a `Number` value.
/// Any other attempts would require slowdowns for faster algorithms.
#[inline]
pub fn scientific_exponent(num: &Number) -> i32 {
// Use power reduction to make this faster.
let mut mantissa = num.mantissa;
let mut exponent = num.exponent;
while mantissa >= 10000 {
mantissa /= 10000;
exponent += 4;
}
while mantissa >= 100 {
mantissa /= 100;
exponent += 2;
}
while mantissa >= 10 {
mantissa /= 10;
exponent += 1;
}
exponent as i32
}
/// Calculate `b` from a a representation of `b` as a float.
#[inline]
pub fn b<F: Float>(float: F) -> ExtendedFloat {
ExtendedFloat {
mant: float.mantissa(),
exp: float.exponent(),
}
}
/// Calculate `b+h` from a a representation of `b` as a float.
#[inline]
pub fn bh<F: Float>(float: F) -> ExtendedFloat {
let fp = b(float);
ExtendedFloat {
mant: (fp.mant << 1) + 1,
exp: fp.exp - 1,
}
}
|
{
// Simple, we just need to multiply by the power of the radix.
// Now, we can calculate the mantissa and the exponent from this.
// The binary exponent is the binary exponent for the mantissa
// shifted to the hidden bit.
bigmant.pow(10, exponent as u32).unwrap();
// Get the exact representation of the float from the big integer.
// hi64 checks **all** the remaining bits after the mantissa,
// so it will check if **any** truncated digits exist.
let (mant, is_truncated) = bigmant.hi64();
let exp = bigmant.bit_length() as i32 - 64 + F::EXPONENT_BIAS;
let mut fp = ExtendedFloat {
mant,
exp,
};
// Shift the digits into position and determine if we need to round-up.
round::<F, _>(&mut fp, |f, s| {
round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| {
|
identifier_body
|
specialization_graph.rs
|
StableHasherResult};
use traits;
use ty::{self, TyCtxt, TypeFoldable};
use ty::fast_reject::{self, SimplifiedType};
use rustc_data_structures::sync::Lrc;
use syntax::ast::Ident;
use util::captures::Captures;
use util::nodemap::{DefIdMap, FxHashMap};
/// A per-trait graph of impls in specialization order. At the moment, this
/// graph forms a tree rooted with the trait itself, with all other nodes
/// representing impls, and parent-child relationships representing
/// specializations.
///
/// The graph provides two key services:
///
/// - Construction. This implicitly checks for overlapping impls (i.e., impls
/// that overlap but where neither specializes the other -- an artifact of the
/// simple "chain" rule.
///
/// - Parent extraction. In particular, the graph can give you the *immediate*
/// parents of a given specializing impl, which is needed for extracting
/// default items amongst other things. In the simple "chain" rule, every impl
/// has at most one parent.
#[derive(RustcEncodable, RustcDecodable)]
pub struct Graph {
// All impls have a parent; the "root" impls have as their parent the `def_id`
// of the trait.
parent: DefIdMap<DefId>,
// The "root" impls are found by looking up the trait's def_id.
children: DefIdMap<Children>,
}
/// Children of a given impl, grouped into blanket/non-blanket varieties as is
/// done in `TraitDef`.
#[derive(Default, RustcEncodable, RustcDecodable)]
struct Children {
// Impls of a trait (or specializations of a given impl). To allow for
// quicker lookup, the impls are indexed by a simplified version of their
// `Self` type: impls with a simplifiable `Self` are stored in
// `nonblanket_impls` keyed by it, while all other impls are stored in
// `blanket_impls`.
//
// A similar division is used within `TraitDef`, but the lists there collect
// together *all* the impls for a trait, and are populated prior to building
// the specialization graph.
/// Impls of the trait.
nonblanket_impls: FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
/// Blanket impls associated with the trait.
blanket_impls: Vec<DefId>,
}
/// The result of attempting to insert an impl into a group of children.
enum Inserted {
/// The impl was inserted as a new child in this group of children.
BecameNewSibling(Option<OverlapError>),
/// The impl should replace existing impls [X1,..], because the impl specializes X1, X2, etc.
ReplaceChildren(Vec<DefId>),
/// The impl is a specialization of an existing child.
ShouldRecurseOn(DefId),
}
impl<'a, 'gcx, 'tcx> Children {
/// Insert an impl into this set of children without comparing to any existing impls.
fn insert_blindly(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId) {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) {
debug!("insert_blindly: impl_def_id={:?} sty={:?}", impl_def_id, sty);
self.nonblanket_impls.entry(sty).or_default().push(impl_def_id)
} else {
debug!("insert_blindly: impl_def_id={:?} sty=None", impl_def_id);
self.blanket_impls.push(impl_def_id)
}
}
/// Remove an impl from this set of children. Used when replacing
/// an impl with a parent. The impl must be present in the list of
/// children already.
fn remove_existing(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId) {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let vec: &mut Vec<DefId>;
if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) {
debug!("remove_existing: impl_def_id={:?} sty={:?}", impl_def_id, sty);
vec = self.nonblanket_impls.get_mut(&sty).unwrap();
} else {
debug!("remove_existing: impl_def_id={:?} sty=None", impl_def_id);
vec = &mut self.blanket_impls;
}
let index = vec.iter().position(|d| *d == impl_def_id).unwrap();
vec.remove(index);
}
/// Attempt to insert an impl into this set of children, while comparing for
/// specialization relationships.
fn insert(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId,
simplified_self: Option<SimplifiedType>)
-> Result<Inserted, OverlapError>
{
let mut last_lint = None;
let mut replace_children = Vec::new();
debug!(
"insert(impl_def_id={:?}, simplified_self={:?})",
impl_def_id,
simplified_self,
);
let possible_siblings = match simplified_self {
Some(sty) => PotentialSiblings::Filtered(self.filtered(sty)),
None => PotentialSiblings::Unfiltered(self.iter()),
};
for possible_sibling in possible_siblings {
debug!(
"insert: impl_def_id={:?}, simplified_self={:?}, possible_sibling={:?}",
impl_def_id,
simplified_self,
possible_sibling,
);
let overlap_error = |overlap: traits::coherence::OverlapResult<'_>| {
// Found overlap, but no specialization; error out.
let trait_ref = overlap.impl_header.trait_ref.unwrap();
let self_ty = trait_ref.self_ty();
OverlapError {
with_impl: possible_sibling,
trait_desc: trait_ref.to_string(),
// Only report the `Self` type if it has at least
// some outer concrete shell; otherwise, it's
// not adding much information.
self_desc: if self_ty.has_concrete_skeleton() {
Some(self_ty.to_string())
} else {
None
},
intercrate_ambiguity_causes: overlap.intercrate_ambiguity_causes,
}
};
let tcx = tcx.global_tcx();
let (le, ge) = traits::overlapping_impls(
tcx,
possible_sibling,
impl_def_id,
traits::IntercrateMode::Issue43355,
|overlap| {
if tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) {
return Ok((false, false));
}
let le = tcx.specializes((impl_def_id, possible_sibling));
let ge = tcx.specializes((possible_sibling, impl_def_id));
if le == ge {
Err(overlap_error(overlap))
} else {
Ok((le, ge))
}
},
|| Ok((false, false)),
)?;
if le &&!ge {
debug!("descending as child of TraitRef {:?}",
tcx.impl_trait_ref(possible_sibling).unwrap());
// The impl specializes `possible_sibling`.
return Ok(Inserted::ShouldRecurseOn(possible_sibling));
} else if ge &&!le {
debug!("placing as parent of TraitRef {:?}",
tcx.impl_trait_ref(possible_sibling).unwrap());
replace_children.push(possible_sibling);
} else {
if!tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) {
traits::overlapping_impls(
tcx,
possible_sibling,
impl_def_id,
traits::IntercrateMode::Fixed,
|overlap| last_lint = Some(overlap_error(overlap)),
|| (),
);
}
// no overlap (error bailed already via?)
}
}
if!replace_children.is_empty() {
return Ok(Inserted::ReplaceChildren(replace_children));
}
// No overlap with any potential siblings, so add as a new sibling.
debug!("placing as new sibling");
self.insert_blindly(tcx, impl_def_id);
Ok(Inserted::BecameNewSibling(last_lint))
}
fn iter(&mut self) -> impl Iterator<Item = DefId> + '_ {
let nonblanket = self.nonblanket_impls.iter_mut().flat_map(|(_, v)| v.iter());
self.blanket_impls.iter().chain(nonblanket).cloned()
}
fn filtered(&mut self, sty: SimplifiedType) -> impl Iterator<Item = DefId> + '_ {
let nonblanket = self.nonblanket_impls.entry(sty).or_default().iter();
self.blanket_impls.iter().chain(nonblanket).cloned()
}
}
// A custom iterator used by Children::insert
enum PotentialSiblings<I, J>
where I: Iterator<Item = DefId>,
J: Iterator<Item = DefId>
{
Unfiltered(I),
Filtered(J)
}
impl<I, J> Iterator for PotentialSiblings<I, J>
where I: Iterator<Item = DefId>,
J: Iterator<Item = DefId>
{
type Item = DefId;
fn next(&mut self) -> Option<Self::Item> {
match *self {
PotentialSiblings::Unfiltered(ref mut iter) => iter.next(),
PotentialSiblings::Filtered(ref mut iter) => iter.next()
}
}
}
impl<'a, 'gcx, 'tcx> Graph {
pub fn new() -> Graph {
Graph {
parent: Default::default(),
children: Default::default(),
}
}
/// Insert a local impl into the specialization graph. If an existing impl
/// conflicts with it (has overlap, but neither specializes the other),
/// information about the area of overlap is returned in the `Err`.
pub fn insert(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId)
-> Result<Option<OverlapError>, OverlapError> {
assert!(impl_def_id.is_local());
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let trait_def_id = trait_ref.def_id;
debug!("insert({:?}): inserting TraitRef {:?} into specialization graph",
impl_def_id, trait_ref);
// If the reference itself contains an earlier error (e.g., due to a
// resolution failure), then we just insert the impl at the top level of
// the graph and claim that there's no overlap (in order to suppress
// bogus errors).
if trait_ref.references_error() {
debug!("insert: inserting dummy node for erroneous TraitRef {:?}, \
impl_def_id={:?}, trait_def_id={:?}",
trait_ref, impl_def_id, trait_def_id);
self.parent.insert(impl_def_id, trait_def_id);
self.children.entry(trait_def_id).or_default()
.insert_blindly(tcx, impl_def_id);
return Ok(None);
}
let mut parent = trait_def_id;
let mut last_lint = None;
let simplified = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false);
// Descend the specialization tree, where `parent` is the current parent node.
loop {
use self::Inserted::*;
let insert_result = self.children.entry(parent).or_default()
.insert(tcx, impl_def_id, simplified)?;
match insert_result {
BecameNewSibling(opt_lint) => {
last_lint = opt_lint;
break;
}
ReplaceChildren(grand_children_to_be) => {
// We currently have
//
// P
// |
// G
//
// and we are inserting the impl N. We want to make it:
//
// P
// |
// N
// |
// G
// Adjust P's list of children: remove G and then add N.
{
let siblings = self.children
.get_mut(&parent)
.unwrap();
for &grand_child_to_be in &grand_children_to_be {
siblings.remove_existing(tcx, grand_child_to_be);
}
siblings.insert_blindly(tcx, impl_def_id);
}
// Set G's parent to N and N's parent to P.
for &grand_child_to_be in &grand_children_to_be {
self.parent.insert(grand_child_to_be, impl_def_id);
}
self.parent.insert(impl_def_id, parent);
// Add G as N's child.
for &grand_child_to_be in &grand_children_to_be {
self.children.entry(impl_def_id).or_default()
.insert_blindly(tcx, grand_child_to_be);
}
break;
}
ShouldRecurseOn(new_parent) => {
parent = new_parent;
}
}
}
self.parent.insert(impl_def_id, parent);
Ok(last_lint)
}
/// Insert cached metadata mapping from a child impl back to its parent.
pub fn record_impl_from_cstore(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
parent: DefId,
child: DefId) {
if self.parent.insert(child, parent).is_some() {
bug!("When recording an impl from the crate store, information about its parent \
was already present.");
}
self.children.entry(parent).or_default().insert_blindly(tcx, child);
}
/// The parent of a given impl, which is the def id of the trait when the
/// impl is a "specialization root".
pub fn parent(&self, child: DefId) -> DefId {
*self.parent.get(&child).unwrap()
}
}
/// A node in the specialization graph is either an impl or a trait
/// definition; either can serve as a source of item definitions.
/// There is always exactly one trait definition node: the root.
#[derive(Debug, Copy, Clone)]
pub enum
|
{
Impl(DefId),
Trait(DefId),
}
impl<'a, 'gcx, 'tcx> Node {
pub fn is_from_trait(&self) -> bool {
match *self {
Node::Trait(..) => true,
_ => false,
}
}
/// Iterate over the items defined directly by the given (impl or trait) node.
pub fn items(
&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
) -> ty::AssociatedItemsIterator<'a, 'gcx, 'tcx> {
tcx.associated_items(self.def_id())
}
pub fn def_id(&self) -> DefId {
match *self {
Node::Impl(did) => did,
Node::Trait(did) => did,
}
}
}
pub struct Ancestors {
trait_def_id: DefId,
specialization_graph: Lrc<Graph>,
current_source: Option<Node>,
}
impl Iterator for Ancestors {
type Item = Node;
fn next(&mut self) -> Option<Node> {
let cur = self.current_source.take();
if let Some(Node::Impl(cur_impl)) = cur {
let parent = self.specialization_graph.parent(cur_impl);
self.current_source = if parent == self.trait_def_id {
Some(Node::Trait(parent))
} else {
Some(Node::Impl(parent))
};
}
cur
}
}
pub struct NodeItem<T> {
pub node: Node,
pub item: T,
}
impl<T> NodeItem<T> {
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> NodeItem<U> {
NodeItem {
node: self.node,
item: f(self.item),
}
}
}
impl<'a, 'gcx, 'tcx> Ancestors {
/// Search the items from the given ancestors, returning each definition
/// with the given name and the given kind.
// FIXME(#35870): avoid closures being unexported due to `impl Trait`.
#[inline]
pub fn defs(
self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
trait_item_name: Ident,
trait_item_kind: ty::AssociatedKind,
trait_def_id: DefId,
) -> impl Iterator<Item = NodeItem<ty::AssociatedItem>> + Captures<'gcx> + Captures<'tcx> + 'a {
self.flat_map(move |node| {
use ty::AssociatedKind::*;
node.items(tcx).filter(move |impl_item| match (trait_item_kind, impl_item.kind) {
| (Const, Const)
| (Method, Method)
| (Type, Type)
| (Type, Existential)
=> tcx.hygienic_eq(impl_item.ident, trait_item_name, trait_def_id),
| (Const, _)
| (Method, _)
| (Type, _)
| (Existential, _)
=> false,
}).map(move |item| NodeItem { node: node, item: item })
})
}
}
/// Walk up the specialization ancestors of a given impl, starting with that
/// impl itself.
pub fn ancestors(tcx: TyCtxt<'_, '_, '_>,
|
Node
|
identifier_name
|
specialization_graph.rs
|
/// Children of a given impl, grouped into blanket/non-blanket varieties as is
/// done in `TraitDef`.
#[derive(Default, RustcEncodable, RustcDecodable)]
struct Children {
// Impls of a trait (or specializations of a given impl). To allow for
// quicker lookup, the impls are indexed by a simplified version of their
// `Self` type: impls with a simplifiable `Self` are stored in
// `nonblanket_impls` keyed by it, while all other impls are stored in
// `blanket_impls`.
//
// A similar division is used within `TraitDef`, but the lists there collect
// together *all* the impls for a trait, and are populated prior to building
// the specialization graph.
/// Impls of the trait.
nonblanket_impls: FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>,
/// Blanket impls associated with the trait.
blanket_impls: Vec<DefId>,
}
/// The result of attempting to insert an impl into a group of children.
enum Inserted {
/// The impl was inserted as a new child in this group of children.
BecameNewSibling(Option<OverlapError>),
/// The impl should replace existing impls [X1,..], because the impl specializes X1, X2, etc.
ReplaceChildren(Vec<DefId>),
/// The impl is a specialization of an existing child.
ShouldRecurseOn(DefId),
}
impl<'a, 'gcx, 'tcx> Children {
/// Insert an impl into this set of children without comparing to any existing impls.
fn insert_blindly(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId) {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) {
debug!("insert_blindly: impl_def_id={:?} sty={:?}", impl_def_id, sty);
self.nonblanket_impls.entry(sty).or_default().push(impl_def_id)
} else {
debug!("insert_blindly: impl_def_id={:?} sty=None", impl_def_id);
self.blanket_impls.push(impl_def_id)
}
}
/// Remove an impl from this set of children. Used when replacing
/// an impl with a parent. The impl must be present in the list of
/// children already.
fn remove_existing(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId) {
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let vec: &mut Vec<DefId>;
if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) {
debug!("remove_existing: impl_def_id={:?} sty={:?}", impl_def_id, sty);
vec = self.nonblanket_impls.get_mut(&sty).unwrap();
} else {
debug!("remove_existing: impl_def_id={:?} sty=None", impl_def_id);
vec = &mut self.blanket_impls;
}
let index = vec.iter().position(|d| *d == impl_def_id).unwrap();
vec.remove(index);
}
/// Attempt to insert an impl into this set of children, while comparing for
/// specialization relationships.
fn insert(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId,
simplified_self: Option<SimplifiedType>)
-> Result<Inserted, OverlapError>
{
let mut last_lint = None;
let mut replace_children = Vec::new();
debug!(
"insert(impl_def_id={:?}, simplified_self={:?})",
impl_def_id,
simplified_self,
);
let possible_siblings = match simplified_self {
Some(sty) => PotentialSiblings::Filtered(self.filtered(sty)),
None => PotentialSiblings::Unfiltered(self.iter()),
};
for possible_sibling in possible_siblings {
debug!(
"insert: impl_def_id={:?}, simplified_self={:?}, possible_sibling={:?}",
impl_def_id,
simplified_self,
possible_sibling,
);
let overlap_error = |overlap: traits::coherence::OverlapResult<'_>| {
// Found overlap, but no specialization; error out.
let trait_ref = overlap.impl_header.trait_ref.unwrap();
let self_ty = trait_ref.self_ty();
OverlapError {
with_impl: possible_sibling,
trait_desc: trait_ref.to_string(),
// Only report the `Self` type if it has at least
// some outer concrete shell; otherwise, it's
// not adding much information.
self_desc: if self_ty.has_concrete_skeleton() {
Some(self_ty.to_string())
} else {
None
},
intercrate_ambiguity_causes: overlap.intercrate_ambiguity_causes,
}
};
let tcx = tcx.global_tcx();
let (le, ge) = traits::overlapping_impls(
tcx,
possible_sibling,
impl_def_id,
traits::IntercrateMode::Issue43355,
|overlap| {
if tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) {
return Ok((false, false));
}
let le = tcx.specializes((impl_def_id, possible_sibling));
let ge = tcx.specializes((possible_sibling, impl_def_id));
if le == ge {
Err(overlap_error(overlap))
} else {
Ok((le, ge))
}
},
|| Ok((false, false)),
)?;
if le &&!ge {
debug!("descending as child of TraitRef {:?}",
tcx.impl_trait_ref(possible_sibling).unwrap());
// The impl specializes `possible_sibling`.
return Ok(Inserted::ShouldRecurseOn(possible_sibling));
} else if ge &&!le {
debug!("placing as parent of TraitRef {:?}",
tcx.impl_trait_ref(possible_sibling).unwrap());
replace_children.push(possible_sibling);
} else {
if!tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) {
traits::overlapping_impls(
tcx,
possible_sibling,
impl_def_id,
traits::IntercrateMode::Fixed,
|overlap| last_lint = Some(overlap_error(overlap)),
|| (),
);
}
// no overlap (error bailed already via?)
}
}
if!replace_children.is_empty() {
return Ok(Inserted::ReplaceChildren(replace_children));
}
// No overlap with any potential siblings, so add as a new sibling.
debug!("placing as new sibling");
self.insert_blindly(tcx, impl_def_id);
Ok(Inserted::BecameNewSibling(last_lint))
}
fn iter(&mut self) -> impl Iterator<Item = DefId> + '_ {
let nonblanket = self.nonblanket_impls.iter_mut().flat_map(|(_, v)| v.iter());
self.blanket_impls.iter().chain(nonblanket).cloned()
}
fn filtered(&mut self, sty: SimplifiedType) -> impl Iterator<Item = DefId> + '_ {
let nonblanket = self.nonblanket_impls.entry(sty).or_default().iter();
self.blanket_impls.iter().chain(nonblanket).cloned()
}
}
// A custom iterator used by Children::insert
enum PotentialSiblings<I, J>
where I: Iterator<Item = DefId>,
J: Iterator<Item = DefId>
{
Unfiltered(I),
Filtered(J)
}
impl<I, J> Iterator for PotentialSiblings<I, J>
where I: Iterator<Item = DefId>,
J: Iterator<Item = DefId>
{
type Item = DefId;
fn next(&mut self) -> Option<Self::Item> {
match *self {
PotentialSiblings::Unfiltered(ref mut iter) => iter.next(),
PotentialSiblings::Filtered(ref mut iter) => iter.next()
}
}
}
impl<'a, 'gcx, 'tcx> Graph {
pub fn new() -> Graph {
Graph {
parent: Default::default(),
children: Default::default(),
}
}
/// Insert a local impl into the specialization graph. If an existing impl
/// conflicts with it (has overlap, but neither specializes the other),
/// information about the area of overlap is returned in the `Err`.
pub fn insert(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
impl_def_id: DefId)
-> Result<Option<OverlapError>, OverlapError> {
assert!(impl_def_id.is_local());
let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
let trait_def_id = trait_ref.def_id;
debug!("insert({:?}): inserting TraitRef {:?} into specialization graph",
impl_def_id, trait_ref);
// If the reference itself contains an earlier error (e.g., due to a
// resolution failure), then we just insert the impl at the top level of
// the graph and claim that there's no overlap (in order to suppress
// bogus errors).
if trait_ref.references_error() {
debug!("insert: inserting dummy node for erroneous TraitRef {:?}, \
impl_def_id={:?}, trait_def_id={:?}",
trait_ref, impl_def_id, trait_def_id);
self.parent.insert(impl_def_id, trait_def_id);
self.children.entry(trait_def_id).or_default()
.insert_blindly(tcx, impl_def_id);
return Ok(None);
}
let mut parent = trait_def_id;
let mut last_lint = None;
let simplified = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false);
// Descend the specialization tree, where `parent` is the current parent node.
loop {
use self::Inserted::*;
let insert_result = self.children.entry(parent).or_default()
.insert(tcx, impl_def_id, simplified)?;
match insert_result {
BecameNewSibling(opt_lint) => {
last_lint = opt_lint;
break;
}
ReplaceChildren(grand_children_to_be) => {
// We currently have
//
// P
// |
// G
//
// and we are inserting the impl N. We want to make it:
//
// P
// |
// N
// |
// G
// Adjust P's list of children: remove G and then add N.
{
let siblings = self.children
.get_mut(&parent)
.unwrap();
for &grand_child_to_be in &grand_children_to_be {
siblings.remove_existing(tcx, grand_child_to_be);
}
siblings.insert_blindly(tcx, impl_def_id);
}
// Set G's parent to N and N's parent to P.
for &grand_child_to_be in &grand_children_to_be {
self.parent.insert(grand_child_to_be, impl_def_id);
}
self.parent.insert(impl_def_id, parent);
// Add G as N's child.
for &grand_child_to_be in &grand_children_to_be {
self.children.entry(impl_def_id).or_default()
.insert_blindly(tcx, grand_child_to_be);
}
break;
}
ShouldRecurseOn(new_parent) => {
parent = new_parent;
}
}
}
self.parent.insert(impl_def_id, parent);
Ok(last_lint)
}
/// Insert cached metadata mapping from a child impl back to its parent.
pub fn record_impl_from_cstore(&mut self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
parent: DefId,
child: DefId) {
if self.parent.insert(child, parent).is_some() {
bug!("When recording an impl from the crate store, information about its parent \
was already present.");
}
self.children.entry(parent).or_default().insert_blindly(tcx, child);
}
/// The parent of a given impl, which is the def id of the trait when the
/// impl is a "specialization root".
pub fn parent(&self, child: DefId) -> DefId {
*self.parent.get(&child).unwrap()
}
}
/// A node in the specialization graph is either an impl or a trait
/// definition; either can serve as a source of item definitions.
/// There is always exactly one trait definition node: the root.
#[derive(Debug, Copy, Clone)]
pub enum Node {
Impl(DefId),
Trait(DefId),
}
impl<'a, 'gcx, 'tcx> Node {
pub fn is_from_trait(&self) -> bool {
match *self {
Node::Trait(..) => true,
_ => false,
}
}
/// Iterate over the items defined directly by the given (impl or trait) node.
pub fn items(
&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
) -> ty::AssociatedItemsIterator<'a, 'gcx, 'tcx> {
tcx.associated_items(self.def_id())
}
pub fn def_id(&self) -> DefId {
match *self {
Node::Impl(did) => did,
Node::Trait(did) => did,
}
}
}
pub struct Ancestors {
trait_def_id: DefId,
specialization_graph: Lrc<Graph>,
current_source: Option<Node>,
}
impl Iterator for Ancestors {
type Item = Node;
fn next(&mut self) -> Option<Node> {
let cur = self.current_source.take();
if let Some(Node::Impl(cur_impl)) = cur {
let parent = self.specialization_graph.parent(cur_impl);
self.current_source = if parent == self.trait_def_id {
Some(Node::Trait(parent))
} else {
Some(Node::Impl(parent))
};
}
cur
}
}
pub struct NodeItem<T> {
pub node: Node,
pub item: T,
}
impl<T> NodeItem<T> {
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> NodeItem<U> {
NodeItem {
node: self.node,
item: f(self.item),
}
}
}
impl<'a, 'gcx, 'tcx> Ancestors {
/// Search the items from the given ancestors, returning each definition
/// with the given name and the given kind.
// FIXME(#35870): avoid closures being unexported due to `impl Trait`.
#[inline]
pub fn defs(
self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
trait_item_name: Ident,
trait_item_kind: ty::AssociatedKind,
trait_def_id: DefId,
) -> impl Iterator<Item = NodeItem<ty::AssociatedItem>> + Captures<'gcx> + Captures<'tcx> + 'a {
self.flat_map(move |node| {
use ty::AssociatedKind::*;
node.items(tcx).filter(move |impl_item| match (trait_item_kind, impl_item.kind) {
| (Const, Const)
| (Method, Method)
| (Type, Type)
| (Type, Existential)
=> tcx.hygienic_eq(impl_item.ident, trait_item_name, trait_def_id),
| (Const, _)
| (Method, _)
| (Type, _)
| (Existential, _)
=> false,
}).map(move |item| NodeItem { node: node, item: item })
})
}
}
/// Walk up the specialization ancestors of a given impl, starting with that
/// impl itself.
pub fn ancestors(tcx: TyCtxt<'_, '_, '_>,
trait_def_id: DefId,
start_from_impl: DefId)
-> Ancestors {
let specialization_graph = tcx.specialization_graph_of(trait_def_id);
Ancestors {
trait_def_id,
specialization_graph,
current_source: Some(Node::Impl(start_from_impl)),
}
}
impl<'a> HashStable<StableHashingContext<'a>> for Children {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a>,
hasher: &mut StableHasher<W>) {
let Children {
ref nonblanket_impls,
ref blanket_impls,
} = *self;
ich::hash_stable_trait_impls(hcx, hasher, blanket_impls, nonblanket_impls);
|
}
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.